summaryrefslogtreecommitdiff
path: root/deps/openssl/openssl/crypto/ec/asm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/openssl/openssl/crypto/ec/asm')
-rwxr-xr-xdeps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv4.pl4
-rw-r--r--deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv8.pl311
-rwxr-xr-xdeps/openssl/openssl/crypto/ec/asm/ecp_nistz256-avx2.pl42
-rwxr-xr-xdeps/openssl/openssl/crypto/ec/asm/ecp_nistz256-ppc64.pl2382
-rwxr-xr-xdeps/openssl/openssl/crypto/ec/asm/ecp_nistz256-sparcv9.pl8
-rwxr-xr-xdeps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86.pl4
-rwxr-xr-xdeps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86_64.pl1874
-rwxr-xr-xdeps/openssl/openssl/crypto/ec/asm/x25519-ppc64.pl824
-rwxr-xr-xdeps/openssl/openssl/crypto/ec/asm/x25519-x86_64.pl1117
9 files changed, 6396 insertions, 170 deletions
diff --git a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv4.pl b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv4.pl
index 4eb4c68977..83abbdd895 100755
--- a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv4.pl
+++ b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv4.pl
@@ -233,7 +233,7 @@ __ecp_nistz256_add:
@ if a+b >= modulus, subtract modulus.
@
@ But since comparison implies subtraction, we subtract
- @ modulus and then add it back if subraction borrowed.
+ @ modulus and then add it back if subtraction borrowed.
subs $a0,$a0,#-1
sbcs $a1,$a1,#-1
@@ -1222,7 +1222,7 @@ __ecp_nistz256_add_self:
@ if a+b >= modulus, subtract modulus.
@
@ But since comparison implies subtraction, we subtract
- @ modulus and then add it back if subraction borrowed.
+ @ modulus and then add it back if subtraction borrowed.
subs $a0,$a0,#-1
sbcs $a1,$a1,#-1
diff --git a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv8.pl b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv8.pl
index 2a39675bfd..1361cb395f 100644
--- a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv8.pl
+++ b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-armv8.pl
@@ -22,11 +22,10 @@
# http://eprint.iacr.org/2013/816.
#
# with/without -DECP_NISTZ256_ASM
-# Apple A7 +120-360%
-# Cortex-A53 +120-400%
-# Cortex-A57 +120-350%
-# X-Gene +200-330%
-# Denver +140-400%
+# Apple A7 +190-360%
+# Cortex-A53 +190-400%
+# Cortex-A57 +190-350%
+# Denver +230-400%
#
# Ranges denote minimum and maximum improvement coefficients depending
# on benchmark. Lower coefficients are for ECDSA sign, server-side
@@ -109,6 +108,10 @@ $code.=<<___;
.quad 0x0000000000000001,0xffffffff00000000,0xffffffffffffffff,0x00000000fffffffe
.Lone:
.quad 1,0,0,0
+.Lord:
+.quad 0xf3b9cac2fc632551,0xbce6faada7179e84,0xffffffffffffffff,0xffffffff00000000
+.LordK:
+.quad 0xccd1c8aaee00bc4f
.asciz "ECP_NISTZ256 for ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
// void ecp_nistz256_to_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
@@ -660,7 +663,7 @@ __ecp_nistz256_div_by_2:
adc $ap,xzr,xzr // zap $ap
tst $acc0,#1 // is a even?
- csel $acc0,$acc0,$t0,eq // ret = even ? a : a+modulus
+ csel $acc0,$acc0,$t0,eq // ret = even ? a : a+modulus
csel $acc1,$acc1,$t1,eq
csel $acc2,$acc2,$t2,eq
csel $acc3,$acc3,$t3,eq
@@ -1309,6 +1312,302 @@ $code.=<<___;
ret
.size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
___
+}
+if (1) {
+my ($ord0,$ord1) = ($poly1,$poly3);
+my ($ord2,$ord3,$ordk,$t4) = map("x$_",(21..24));
+my $acc7 = $bi;
+
+$code.=<<___;
+////////////////////////////////////////////////////////////////////////
+// void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
+// uint64_t b[4]);
+.globl ecp_nistz256_ord_mul_mont
+.type ecp_nistz256_ord_mul_mont,%function
+.align 4
+ecp_nistz256_ord_mul_mont:
+ stp x29,x30,[sp,#-64]!
+ add x29,sp,#0
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+
+ adr $ordk,.Lord
+ ldr $bi,[$bp] // bp[0]
+ ldp $a0,$a1,[$ap]
+ ldp $a2,$a3,[$ap,#16]
+
+ ldp $ord0,$ord1,[$ordk,#0]
+ ldp $ord2,$ord3,[$ordk,#16]
+ ldr $ordk,[$ordk,#32]
+
+ mul $acc0,$a0,$bi // a[0]*b[0]
+ umulh $t0,$a0,$bi
+
+ mul $acc1,$a1,$bi // a[1]*b[0]
+ umulh $t1,$a1,$bi
+
+ mul $acc2,$a2,$bi // a[2]*b[0]
+ umulh $t2,$a2,$bi
+
+ mul $acc3,$a3,$bi // a[3]*b[0]
+ umulh $acc4,$a3,$bi
+
+ mul $t4,$acc0,$ordk
+
+ adds $acc1,$acc1,$t0 // accumulate high parts of multiplication
+ adcs $acc2,$acc2,$t1
+ adcs $acc3,$acc3,$t2
+ adc $acc4,$acc4,xzr
+ mov $acc5,xzr
+___
+for ($i=1;$i<4;$i++) {
+ ################################################################
+ # ffff0000.ffffffff.yyyyyyyy.zzzzzzzz
+ # * abcdefgh
+ # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
+ #
+ # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
+ # rewrite above as:
+ #
+ # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
+ # - 0000abcd.efgh0000.abcdefgh.00000000.00000000
+ # + abcdefgh.abcdefgh.yzayzbyz.cyzdyzey.zfyzgyzh
+$code.=<<___;
+ ldr $bi,[$bp,#8*$i] // b[i]
+
+ lsl $t0,$t4,#32
+ subs $acc2,$acc2,$t4
+ lsr $t1,$t4,#32
+ sbcs $acc3,$acc3,$t0
+ sbcs $acc4,$acc4,$t1
+ sbc $acc5,$acc5,xzr
+
+ subs xzr,$acc0,#1
+ umulh $t1,$ord0,$t4
+ mul $t2,$ord1,$t4
+ umulh $t3,$ord1,$t4
+
+ adcs $t2,$t2,$t1
+ mul $t0,$a0,$bi
+ adc $t3,$t3,xzr
+ mul $t1,$a1,$bi
+
+ adds $acc0,$acc1,$t2
+ mul $t2,$a2,$bi
+ adcs $acc1,$acc2,$t3
+ mul $t3,$a3,$bi
+ adcs $acc2,$acc3,$t4
+ adcs $acc3,$acc4,$t4
+ adc $acc4,$acc5,xzr
+
+ adds $acc0,$acc0,$t0 // accumulate low parts
+ umulh $t0,$a0,$bi
+ adcs $acc1,$acc1,$t1
+ umulh $t1,$a1,$bi
+ adcs $acc2,$acc2,$t2
+ umulh $t2,$a2,$bi
+ adcs $acc3,$acc3,$t3
+ umulh $t3,$a3,$bi
+ adc $acc4,$acc4,xzr
+ mul $t4,$acc0,$ordk
+ adds $acc1,$acc1,$t0 // accumulate high parts
+ adcs $acc2,$acc2,$t1
+ adcs $acc3,$acc3,$t2
+ adcs $acc4,$acc4,$t3
+ adc $acc5,xzr,xzr
+___
+}
+$code.=<<___;
+ lsl $t0,$t4,#32 // last reduction
+ subs $acc2,$acc2,$t4
+ lsr $t1,$t4,#32
+ sbcs $acc3,$acc3,$t0
+ sbcs $acc4,$acc4,$t1
+ sbc $acc5,$acc5,xzr
+
+ subs xzr,$acc0,#1
+ umulh $t1,$ord0,$t4
+ mul $t2,$ord1,$t4
+ umulh $t3,$ord1,$t4
+
+ adcs $t2,$t2,$t1
+ adc $t3,$t3,xzr
+
+ adds $acc0,$acc1,$t2
+ adcs $acc1,$acc2,$t3
+ adcs $acc2,$acc3,$t4
+ adcs $acc3,$acc4,$t4
+ adc $acc4,$acc5,xzr
+
+ subs $t0,$acc0,$ord0 // ret -= modulus
+ sbcs $t1,$acc1,$ord1
+ sbcs $t2,$acc2,$ord2
+ sbcs $t3,$acc3,$ord3
+ sbcs xzr,$acc4,xzr
+
+ csel $acc0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
+ csel $acc1,$acc1,$t1,lo
+ csel $acc2,$acc2,$t2,lo
+ stp $acc0,$acc1,[$rp]
+ csel $acc3,$acc3,$t3,lo
+ stp $acc2,$acc3,[$rp,#16]
+
+ ldp x19,x20,[sp,#16]
+ ldp x21,x22,[sp,#32]
+ ldp x23,x24,[sp,#48]
+ ldr x29,[sp],#64
+ ret
+.size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
+
+////////////////////////////////////////////////////////////////////////
+// void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
+// int rep);
+.globl ecp_nistz256_ord_sqr_mont
+.type ecp_nistz256_ord_sqr_mont,%function
+.align 4
+ecp_nistz256_ord_sqr_mont:
+ stp x29,x30,[sp,#-64]!
+ add x29,sp,#0
+ stp x19,x20,[sp,#16]
+ stp x21,x22,[sp,#32]
+ stp x23,x24,[sp,#48]
+
+ adr $ordk,.Lord
+ ldp $a0,$a1,[$ap]
+ ldp $a2,$a3,[$ap,#16]
+
+ ldp $ord0,$ord1,[$ordk,#0]
+ ldp $ord2,$ord3,[$ordk,#16]
+ ldr $ordk,[$ordk,#32]
+ b .Loop_ord_sqr
+
+.align 4
+.Loop_ord_sqr:
+ sub $bp,$bp,#1
+ ////////////////////////////////////////////////////////////////
+ // | | | | | |a1*a0| |
+ // | | | | |a2*a0| | |
+ // | |a3*a2|a3*a0| | | |
+ // | | | |a2*a1| | | |
+ // | | |a3*a1| | | | |
+ // *| | | | | | | | 2|
+ // +|a3*a3|a2*a2|a1*a1|a0*a0|
+ // |--+--+--+--+--+--+--+--|
+ // |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
+ //
+ // "can't overflow" below mark carrying into high part of
+ // multiplication result, which can't overflow, because it
+ // can never be all ones.
+
+ mul $acc1,$a1,$a0 // a[1]*a[0]
+ umulh $t1,$a1,$a0
+ mul $acc2,$a2,$a0 // a[2]*a[0]
+ umulh $t2,$a2,$a0
+ mul $acc3,$a3,$a0 // a[3]*a[0]
+ umulh $acc4,$a3,$a0
+
+ adds $acc2,$acc2,$t1 // accumulate high parts of multiplication
+ mul $t0,$a2,$a1 // a[2]*a[1]
+ umulh $t1,$a2,$a1
+ adcs $acc3,$acc3,$t2
+ mul $t2,$a3,$a1 // a[3]*a[1]
+ umulh $t3,$a3,$a1
+ adc $acc4,$acc4,xzr // can't overflow
+
+ mul $acc5,$a3,$a2 // a[3]*a[2]
+ umulh $acc6,$a3,$a2
+
+ adds $t1,$t1,$t2 // accumulate high parts of multiplication
+ mul $acc0,$a0,$a0 // a[0]*a[0]
+ adc $t2,$t3,xzr // can't overflow
+
+ adds $acc3,$acc3,$t0 // accumulate low parts of multiplication
+ umulh $a0,$a0,$a0
+ adcs $acc4,$acc4,$t1
+ mul $t1,$a1,$a1 // a[1]*a[1]
+ adcs $acc5,$acc5,$t2
+ umulh $a1,$a1,$a1
+ adc $acc6,$acc6,xzr // can't overflow
+
+ adds $acc1,$acc1,$acc1 // acc[1-6]*=2
+ mul $t2,$a2,$a2 // a[2]*a[2]
+ adcs $acc2,$acc2,$acc2
+ umulh $a2,$a2,$a2
+ adcs $acc3,$acc3,$acc3
+ mul $t3,$a3,$a3 // a[3]*a[3]
+ adcs $acc4,$acc4,$acc4
+ umulh $a3,$a3,$a3
+ adcs $acc5,$acc5,$acc5
+ adcs $acc6,$acc6,$acc6
+ adc $acc7,xzr,xzr
+
+ adds $acc1,$acc1,$a0 // +a[i]*a[i]
+ mul $t4,$acc0,$ordk
+ adcs $acc2,$acc2,$t1
+ adcs $acc3,$acc3,$a1
+ adcs $acc4,$acc4,$t2
+ adcs $acc5,$acc5,$a2
+ adcs $acc6,$acc6,$t3
+ adc $acc7,$acc7,$a3
+___
+for($i=0; $i<4; $i++) { # reductions
+$code.=<<___;
+ subs xzr,$acc0,#1
+ umulh $t1,$ord0,$t4
+ mul $t2,$ord1,$t4
+ umulh $t3,$ord1,$t4
+
+ adcs $t2,$t2,$t1
+ adc $t3,$t3,xzr
+
+ adds $acc0,$acc1,$t2
+ adcs $acc1,$acc2,$t3
+ adcs $acc2,$acc3,$t4
+ adc $acc3,xzr,$t4 // can't overflow
+___
+$code.=<<___ if ($i<3);
+ mul $t3,$acc0,$ordk
+___
+$code.=<<___;
+ lsl $t0,$t4,#32
+ subs $acc1,$acc1,$t4
+ lsr $t1,$t4,#32
+ sbcs $acc2,$acc2,$t0
+ sbc $acc3,$acc3,$t1 // can't borrow
+___
+ ($t3,$t4) = ($t4,$t3);
+}
+$code.=<<___;
+ adds $acc0,$acc0,$acc4 // accumulate upper half
+ adcs $acc1,$acc1,$acc5
+ adcs $acc2,$acc2,$acc6
+ adcs $acc3,$acc3,$acc7
+ adc $acc4,xzr,xzr
+
+ subs $t0,$acc0,$ord0 // ret -= modulus
+ sbcs $t1,$acc1,$ord1
+ sbcs $t2,$acc2,$ord2
+ sbcs $t3,$acc3,$ord3
+ sbcs xzr,$acc4,xzr
+
+ csel $a0,$acc0,$t0,lo // ret = borrow ? ret : ret-modulus
+ csel $a1,$acc1,$t1,lo
+ csel $a2,$acc2,$t2,lo
+ csel $a3,$acc3,$t3,lo
+
+ cbnz $bp,.Loop_ord_sqr
+
+ stp $a0,$a1,[$rp]
+ stp $a2,$a3,[$rp,#16]
+
+ ldp x19,x20,[sp,#16]
+ ldp x21,x22,[sp,#32]
+ ldp x23,x24,[sp,#48]
+ ldr x29,[sp],#64
+ ret
+.size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
+___
} }
########################################################################
diff --git a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-avx2.pl b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-avx2.pl
index edd7d01281..794e56a082 100755
--- a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-avx2.pl
+++ b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-avx2.pl
@@ -1,39 +1,19 @@
#! /usr/bin/env perl
# Copyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved.
+# Copyright (c) 2014, Intel Corporation. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
-
-
-##############################################################################
-# #
-# Copyright 2014 Intel Corporation #
-# #
-# Licensed under the Apache License, Version 2.0 (the "License"); #
-# you may not use this file except in compliance with the License. #
-# You may obtain a copy of the License at #
-# #
-# http://www.apache.org/licenses/LICENSE-2.0 #
-# #
-# Unless required by applicable law or agreed to in writing, software #
-# distributed under the License is distributed on an "AS IS" BASIS, #
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
-# See the License for the specific language governing permissions and #
-# limitations under the License. #
-# #
-##############################################################################
-# #
-# Developers and authors: #
-# Shay Gueron (1, 2), and Vlad Krasnov (1) #
-# (1) Intel Corporation, Israel Development Center #
-# (2) University of Haifa #
-# Reference: #
-# S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with#
-# 256 Bit Primes" #
-# #
-##############################################################################
+#
+# Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1)
+# (1) Intel Corporation, Israel Development Center, Haifa, Israel
+# (2) University of Haifa, Israel
+#
+# Reference:
+# S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
+# 256 Bit Primes"
$flavour = shift;
$output = shift;
@@ -157,7 +137,7 @@ ___
{
# This function receives a pointer to an array of four affine points
-# (X, Y, <1>) and rearanges the data for AVX2 execution, while
+# (X, Y, <1>) and rearranges the data for AVX2 execution, while
# converting it to 2^29 radix redundant form
my ($X0,$X1,$X2,$X3, $Y0,$Y1,$Y2,$Y3,
@@ -309,7 +289,7 @@ ___
{
################################################################################
# This function receives a pointer to an array of four AVX2 formatted points
-# (X, Y, Z) convert the data to normal representation, and rearanges the data
+# (X, Y, Z) convert the data to normal representation, and rearranges the data
my ($D0,$D1,$D2,$D3, $D4,$D5,$D6,$D7, $D8)=map("%ymm$_",(0..8));
my ($T0,$T1,$T2,$T3, $T4,$T5,$T6)=map("%ymm$_",(9..15));
diff --git a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-ppc64.pl b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-ppc64.pl
new file mode 100755
index 0000000000..984c7f2050
--- /dev/null
+++ b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-ppc64.pl
@@ -0,0 +1,2382 @@
+#! /usr/bin/env perl
+# Copyright 2016-2018 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# ECP_NISTZ256 module for PPC64.
+#
+# August 2016.
+#
+# Original ECP_NISTZ256 submission targeting x86_64 is detailed in
+# http://eprint.iacr.org/2013/816.
+#
+# with/without -DECP_NISTZ256_ASM
+# POWER7 +260-530%
+# POWER8 +220-340%
+
+$flavour = shift;
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+my $sp="r1";
+
+{
+my ($rp,$ap,$bp,$bi,$acc0,$acc1,$acc2,$acc3,$poly1,$poly3,
+ $acc4,$acc5,$a0,$a1,$a2,$a3,$t0,$t1,$t2,$t3) =
+ map("r$_",(3..12,22..31));
+
+my ($acc6,$acc7)=($bp,$bi); # used in __ecp_nistz256_sqr_mont
+
+$code.=<<___;
+.machine "any"
+.text
+___
+########################################################################
+# Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
+#
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+open TABLE,"<ecp_nistz256_table.c" or
+open TABLE,"<${dir}../ecp_nistz256_table.c" or
+die "failed to open ecp_nistz256_table.c:",$!;
+
+use integer;
+
+foreach(<TABLE>) {
+ s/TOBN\(\s*(0x[0-9a-f]+),\s*(0x[0-9a-f]+)\s*\)/push @arr,hex($2),hex($1)/geo;
+}
+close TABLE;
+
+# See ecp_nistz256_table.c for explanation for why it's 64*16*37.
+# 64*16*37-1 is because $#arr returns last valid index or @arr, not
+# amount of elements.
+die "insane number of elements" if ($#arr != 64*16*37-1);
+
+$code.=<<___;
+.type ecp_nistz256_precomputed,\@object
+.globl ecp_nistz256_precomputed
+.align 12
+ecp_nistz256_precomputed:
+___
+########################################################################
+# this conversion smashes P256_POINT_AFFINE by individual bytes with
+# 64 byte interval, similar to
+# 1111222233334444
+# 1234123412341234
+for(1..37) {
+ @tbl = splice(@arr,0,64*16);
+ for($i=0;$i<64;$i++) {
+ undef @line;
+ for($j=0;$j<64;$j++) {
+ push @line,(@tbl[$j*16+$i/4]>>(($i%4)*8))&0xff;
+ }
+ $code.=".byte\t";
+ $code.=join(',',map { sprintf "0x%02x",$_} @line);
+ $code.="\n";
+ }
+}
+
+$code.=<<___;
+.size ecp_nistz256_precomputed,.-ecp_nistz256_precomputed
+.asciz "ECP_NISTZ256 for PPC64, CRYPTOGAMS by <appro\@openssl.org>"
+
+# void ecp_nistz256_mul_mont(BN_ULONG x0[4],const BN_ULONG x1[4],
+# const BN_ULONG x2[4]);
+.globl ecp_nistz256_mul_mont
+.align 5
+ecp_nistz256_mul_mont:
+ stdu $sp,-128($sp)
+ mflr r0
+ std r22,48($sp)
+ std r23,56($sp)
+ std r24,64($sp)
+ std r25,72($sp)
+ std r26,80($sp)
+ std r27,88($sp)
+ std r28,96($sp)
+ std r29,104($sp)
+ std r30,112($sp)
+ std r31,120($sp)
+
+ ld $a0,0($ap)
+ ld $bi,0($bp)
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ bl __ecp_nistz256_mul_mont
+
+ mtlr r0
+ ld r22,48($sp)
+ ld r23,56($sp)
+ ld r24,64($sp)
+ ld r25,72($sp)
+ ld r26,80($sp)
+ ld r27,88($sp)
+ ld r28,96($sp)
+ ld r29,104($sp)
+ ld r30,112($sp)
+ ld r31,120($sp)
+ addi $sp,$sp,128
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,10,3,0
+ .long 0
+.size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
+
+# void ecp_nistz256_sqr_mont(BN_ULONG x0[4],const BN_ULONG x1[4]);
+.globl ecp_nistz256_sqr_mont
+.align 4
+ecp_nistz256_sqr_mont:
+ stdu $sp,-128($sp)
+ mflr r0
+ std r22,48($sp)
+ std r23,56($sp)
+ std r24,64($sp)
+ std r25,72($sp)
+ std r26,80($sp)
+ std r27,88($sp)
+ std r28,96($sp)
+ std r29,104($sp)
+ std r30,112($sp)
+ std r31,120($sp)
+
+ ld $a0,0($ap)
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ bl __ecp_nistz256_sqr_mont
+
+ mtlr r0
+ ld r22,48($sp)
+ ld r23,56($sp)
+ ld r24,64($sp)
+ ld r25,72($sp)
+ ld r26,80($sp)
+ ld r27,88($sp)
+ ld r28,96($sp)
+ ld r29,104($sp)
+ ld r30,112($sp)
+ ld r31,120($sp)
+ addi $sp,$sp,128
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,10,2,0
+ .long 0
+.size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
+
+# void ecp_nistz256_add(BN_ULONG x0[4],const BN_ULONG x1[4],
+# const BN_ULONG x2[4]);
+.globl ecp_nistz256_add
+.align 4
+ecp_nistz256_add:
+ stdu $sp,-128($sp)
+ mflr r0
+ std r28,96($sp)
+ std r29,104($sp)
+ std r30,112($sp)
+ std r31,120($sp)
+
+ ld $acc0,0($ap)
+ ld $t0, 0($bp)
+ ld $acc1,8($ap)
+ ld $t1, 8($bp)
+ ld $acc2,16($ap)
+ ld $t2, 16($bp)
+ ld $acc3,24($ap)
+ ld $t3, 24($bp)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ bl __ecp_nistz256_add
+
+ mtlr r0
+ ld r28,96($sp)
+ ld r29,104($sp)
+ ld r30,112($sp)
+ ld r31,120($sp)
+ addi $sp,$sp,128
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,4,3,0
+ .long 0
+.size ecp_nistz256_add,.-ecp_nistz256_add
+
+# void ecp_nistz256_div_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
+.globl ecp_nistz256_div_by_2
+.align 4
+ecp_nistz256_div_by_2:
+ stdu $sp,-128($sp)
+ mflr r0
+ std r28,96($sp)
+ std r29,104($sp)
+ std r30,112($sp)
+ std r31,120($sp)
+
+ ld $acc0,0($ap)
+ ld $acc1,8($ap)
+ ld $acc2,16($ap)
+ ld $acc3,24($ap)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ bl __ecp_nistz256_div_by_2
+
+ mtlr r0
+ ld r28,96($sp)
+ ld r29,104($sp)
+ ld r30,112($sp)
+ ld r31,120($sp)
+ addi $sp,$sp,128
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,4,2,0
+ .long 0
+.size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
+
+# void ecp_nistz256_mul_by_2(BN_ULONG x0[4],const BN_ULONG x1[4]);
+.globl ecp_nistz256_mul_by_2
+.align 4
+ecp_nistz256_mul_by_2:
+ stdu $sp,-128($sp)
+ mflr r0
+ std r28,96($sp)
+ std r29,104($sp)
+ std r30,112($sp)
+ std r31,120($sp)
+
+ ld $acc0,0($ap)
+ ld $acc1,8($ap)
+ ld $acc2,16($ap)
+ ld $acc3,24($ap)
+
+ mr $t0,$acc0
+ mr $t1,$acc1
+ mr $t2,$acc2
+ mr $t3,$acc3
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ bl __ecp_nistz256_add # ret = a+a // 2*a
+
+ mtlr r0
+ ld r28,96($sp)
+ ld r29,104($sp)
+ ld r30,112($sp)
+ ld r31,120($sp)
+ addi $sp,$sp,128
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,4,3,0
+ .long 0
+.size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
+
+# void ecp_nistz256_mul_by_3(BN_ULONG x0[4],const BN_ULONG x1[4]);
+.globl ecp_nistz256_mul_by_3
+.align 4
+ecp_nistz256_mul_by_3:
+ stdu $sp,-128($sp)
+ mflr r0
+ std r28,96($sp)
+ std r29,104($sp)
+ std r30,112($sp)
+ std r31,120($sp)
+
+ ld $acc0,0($ap)
+ ld $acc1,8($ap)
+ ld $acc2,16($ap)
+ ld $acc3,24($ap)
+
+ mr $t0,$acc0
+ std $acc0,64($sp)
+ mr $t1,$acc1
+ std $acc1,72($sp)
+ mr $t2,$acc2
+ std $acc2,80($sp)
+ mr $t3,$acc3
+ std $acc3,88($sp)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ bl __ecp_nistz256_add # ret = a+a // 2*a
+
+ ld $t0,64($sp)
+ ld $t1,72($sp)
+ ld $t2,80($sp)
+ ld $t3,88($sp)
+
+ bl __ecp_nistz256_add # ret += a // 2*a+a=3*a
+
+ mtlr r0
+ ld r28,96($sp)
+ ld r29,104($sp)
+ ld r30,112($sp)
+ ld r31,120($sp)
+ addi $sp,$sp,128
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,4,2,0
+ .long 0
+.size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
+
+# void ecp_nistz256_sub(BN_ULONG x0[4],const BN_ULONG x1[4],
+# const BN_ULONG x2[4]);
+.globl ecp_nistz256_sub
+.align 4
+ecp_nistz256_sub:
+ stdu $sp,-128($sp)
+ mflr r0
+ std r28,96($sp)
+ std r29,104($sp)
+ std r30,112($sp)
+ std r31,120($sp)
+
+ ld $acc0,0($ap)
+ ld $acc1,8($ap)
+ ld $acc2,16($ap)
+ ld $acc3,24($ap)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ bl __ecp_nistz256_sub_from
+
+ mtlr r0
+ ld r28,96($sp)
+ ld r29,104($sp)
+ ld r30,112($sp)
+ ld r31,120($sp)
+ addi $sp,$sp,128
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,4,3,0
+ .long 0
+.size ecp_nistz256_sub,.-ecp_nistz256_sub
+
+# void ecp_nistz256_neg(BN_ULONG x0[4],const BN_ULONG x1[4]);
+.globl ecp_nistz256_neg
+.align 4
+ecp_nistz256_neg:
+ stdu $sp,-128($sp)
+ mflr r0
+ std r28,96($sp)
+ std r29,104($sp)
+ std r30,112($sp)
+ std r31,120($sp)
+
+ mr $bp,$ap
+ li $acc0,0
+ li $acc1,0
+ li $acc2,0
+ li $acc3,0
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ bl __ecp_nistz256_sub_from
+
+ mtlr r0
+ ld r28,96($sp)
+ ld r29,104($sp)
+ ld r30,112($sp)
+ ld r31,120($sp)
+ addi $sp,$sp,128
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,4,2,0
+ .long 0
+.size ecp_nistz256_neg,.-ecp_nistz256_neg
+
+# note that __ecp_nistz256_mul_mont expects a[0-3] input pre-loaded
+# to $a0-$a3 and b[0] - to $bi
+.type __ecp_nistz256_mul_mont,\@function
+.align 4
+__ecp_nistz256_mul_mont:
+ mulld $acc0,$a0,$bi # a[0]*b[0]
+ mulhdu $t0,$a0,$bi
+
+ mulld $acc1,$a1,$bi # a[1]*b[0]
+ mulhdu $t1,$a1,$bi
+
+ mulld $acc2,$a2,$bi # a[2]*b[0]
+ mulhdu $t2,$a2,$bi
+
+ mulld $acc3,$a3,$bi # a[3]*b[0]
+ mulhdu $t3,$a3,$bi
+ ld $bi,8($bp) # b[1]
+
+ addc $acc1,$acc1,$t0 # accumulate high parts of multiplication
+ sldi $t0,$acc0,32
+ adde $acc2,$acc2,$t1
+ srdi $t1,$acc0,32
+ adde $acc3,$acc3,$t2
+ addze $acc4,$t3
+ li $acc5,0
+___
+for($i=1;$i<4;$i++) {
+ ################################################################
+ # Reduction iteration is normally performed by accumulating
+ # result of multiplication of modulus by "magic" digit [and
+ # omitting least significant word, which is guaranteed to
+ # be 0], but thanks to special form of modulus and "magic"
+ # digit being equal to least significant word, it can be
+ # performed with additions and subtractions alone. Indeed:
+ #
+ # ffff0001.00000000.0000ffff.ffffffff
+ # * abcdefgh
+ # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
+ #
+ # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
+ # rewrite above as:
+ #
+ # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.abcdefgh
+ # + abcdefgh.abcdefgh.0000abcd.efgh0000.00000000
+ # - 0000abcd.efgh0000.00000000.00000000.abcdefgh
+ #
+ # or marking redundant operations:
+ #
+ # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.--------
+ # + abcdefgh.abcdefgh.0000abcd.efgh0000.--------
+ # - 0000abcd.efgh0000.--------.--------.--------
+
+$code.=<<___;
+ subfc $t2,$t0,$acc0 # "*0xffff0001"
+ subfe $t3,$t1,$acc0
+ addc $acc0,$acc1,$t0 # +=acc[0]<<96 and omit acc[0]
+ adde $acc1,$acc2,$t1
+ adde $acc2,$acc3,$t2 # +=acc[0]*0xffff0001
+ adde $acc3,$acc4,$t3
+ addze $acc4,$acc5
+
+ mulld $t0,$a0,$bi # lo(a[0]*b[i])
+ mulld $t1,$a1,$bi # lo(a[1]*b[i])
+ mulld $t2,$a2,$bi # lo(a[2]*b[i])
+ mulld $t3,$a3,$bi # lo(a[3]*b[i])
+ addc $acc0,$acc0,$t0 # accumulate low parts of multiplication
+ mulhdu $t0,$a0,$bi # hi(a[0]*b[i])
+ adde $acc1,$acc1,$t1
+ mulhdu $t1,$a1,$bi # hi(a[1]*b[i])
+ adde $acc2,$acc2,$t2
+ mulhdu $t2,$a2,$bi # hi(a[2]*b[i])
+ adde $acc3,$acc3,$t3
+ mulhdu $t3,$a3,$bi # hi(a[3]*b[i])
+ addze $acc4,$acc4
+___
+$code.=<<___ if ($i<3);
+ ld $bi,8*($i+1)($bp) # b[$i+1]
+___
+$code.=<<___;
+ addc $acc1,$acc1,$t0 # accumulate high parts of multiplication
+ sldi $t0,$acc0,32
+ adde $acc2,$acc2,$t1
+ srdi $t1,$acc0,32
+ adde $acc3,$acc3,$t2
+ adde $acc4,$acc4,$t3
+ li $acc5,0
+ addze $acc5,$acc5
+___
+}
+$code.=<<___;
+ # last reduction
+ subfc $t2,$t0,$acc0 # "*0xffff0001"
+ subfe $t3,$t1,$acc0
+ addc $acc0,$acc1,$t0 # +=acc[0]<<96 and omit acc[0]
+ adde $acc1,$acc2,$t1
+ adde $acc2,$acc3,$t2 # +=acc[0]*0xffff0001
+ adde $acc3,$acc4,$t3
+ addze $acc4,$acc5
+
+ li $t2,0
+ addic $acc0,$acc0,1 # ret -= modulus
+ subfe $acc1,$poly1,$acc1
+ subfe $acc2,$t2,$acc2
+ subfe $acc3,$poly3,$acc3
+ subfe $acc4,$t2,$acc4
+
+ addc $acc0,$acc0,$acc4 # ret += modulus if borrow
+ and $t1,$poly1,$acc4
+ and $t3,$poly3,$acc4
+ adde $acc1,$acc1,$t1
+ addze $acc2,$acc2
+ adde $acc3,$acc3,$t3
+
+ std $acc0,0($rp)
+ std $acc1,8($rp)
+ std $acc2,16($rp)
+ std $acc3,24($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,1,0
+ .long 0
+.size __ecp_nistz256_mul_mont,.-__ecp_nistz256_mul_mont
+
+# note that __ecp_nistz256_sqr_mont expects a[0-3] input pre-loaded
+# to $a0-$a3
+.type __ecp_nistz256_sqr_mont,\@function
+.align 4
+__ecp_nistz256_sqr_mont:
+ ################################################################
+ # | | | | | |a1*a0| |
+ # | | | | |a2*a0| | |
+ # | |a3*a2|a3*a0| | | |
+ # | | | |a2*a1| | | |
+ # | | |a3*a1| | | | |
+ # *| | | | | | | | 2|
+ # +|a3*a3|a2*a2|a1*a1|a0*a0|
+ # |--+--+--+--+--+--+--+--|
+ # |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
+ #
+ # "can't overflow" below mark carrying into high part of
+ # multiplication result, which can't overflow, because it
+ # can never be all ones.
+
+ mulld $acc1,$a1,$a0 # a[1]*a[0]
+ mulhdu $t1,$a1,$a0
+ mulld $acc2,$a2,$a0 # a[2]*a[0]
+ mulhdu $t2,$a2,$a0
+ mulld $acc3,$a3,$a0 # a[3]*a[0]
+ mulhdu $acc4,$a3,$a0
+
+ addc $acc2,$acc2,$t1 # accumulate high parts of multiplication
+ mulld $t0,$a2,$a1 # a[2]*a[1]
+ mulhdu $t1,$a2,$a1
+ adde $acc3,$acc3,$t2
+ mulld $t2,$a3,$a1 # a[3]*a[1]
+ mulhdu $t3,$a3,$a1
+ addze $acc4,$acc4 # can't overflow
+
+ mulld $acc5,$a3,$a2 # a[3]*a[2]
+ mulhdu $acc6,$a3,$a2
+
+ addc $t1,$t1,$t2 # accumulate high parts of multiplication
+ addze $t2,$t3 # can't overflow
+
+ addc $acc3,$acc3,$t0 # accumulate low parts of multiplication
+ adde $acc4,$acc4,$t1
+ adde $acc5,$acc5,$t2
+ addze $acc6,$acc6 # can't overflow
+
+ addc $acc1,$acc1,$acc1 # acc[1-6]*=2
+ adde $acc2,$acc2,$acc2
+ adde $acc3,$acc3,$acc3
+ adde $acc4,$acc4,$acc4
+ adde $acc5,$acc5,$acc5
+ adde $acc6,$acc6,$acc6
+ li $acc7,0
+ addze $acc7,$acc7
+
+ mulld $acc0,$a0,$a0 # a[0]*a[0]
+ mulhdu $a0,$a0,$a0
+ mulld $t1,$a1,$a1 # a[1]*a[1]
+ mulhdu $a1,$a1,$a1
+ mulld $t2,$a2,$a2 # a[2]*a[2]
+ mulhdu $a2,$a2,$a2
+ mulld $t3,$a3,$a3 # a[3]*a[3]
+ mulhdu $a3,$a3,$a3
+ addc $acc1,$acc1,$a0 # +a[i]*a[i]
+ sldi $t0,$acc0,32
+ adde $acc2,$acc2,$t1
+ srdi $t1,$acc0,32
+ adde $acc3,$acc3,$a1
+ adde $acc4,$acc4,$t2
+ adde $acc5,$acc5,$a2
+ adde $acc6,$acc6,$t3
+ adde $acc7,$acc7,$a3
+___
+for($i=0;$i<3;$i++) { # reductions, see commentary in
+ # multiplication for details
+$code.=<<___;
+ subfc $t2,$t0,$acc0 # "*0xffff0001"
+ subfe $t3,$t1,$acc0
+ addc $acc0,$acc1,$t0 # +=acc[0]<<96 and omit acc[0]
+ sldi $t0,$acc0,32
+ adde $acc1,$acc2,$t1
+ srdi $t1,$acc0,32
+ adde $acc2,$acc3,$t2 # +=acc[0]*0xffff0001
+ addze $acc3,$t3 # can't overflow
+___
+}
+$code.=<<___;
+ subfc $t2,$t0,$acc0 # "*0xffff0001"
+ subfe $t3,$t1,$acc0
+ addc $acc0,$acc1,$t0 # +=acc[0]<<96 and omit acc[0]
+ adde $acc1,$acc2,$t1
+ adde $acc2,$acc3,$t2 # +=acc[0]*0xffff0001
+ addze $acc3,$t3 # can't overflow
+
+ addc $acc0,$acc0,$acc4 # accumulate upper half
+ adde $acc1,$acc1,$acc5
+ adde $acc2,$acc2,$acc6
+ adde $acc3,$acc3,$acc7
+ li $t2,0
+ addze $acc4,$t2
+
+ addic $acc0,$acc0,1 # ret -= modulus
+ subfe $acc1,$poly1,$acc1
+ subfe $acc2,$t2,$acc2
+ subfe $acc3,$poly3,$acc3
+ subfe $acc4,$t2,$acc4
+
+ addc $acc0,$acc0,$acc4 # ret += modulus if borrow
+ and $t1,$poly1,$acc4
+ and $t3,$poly3,$acc4
+ adde $acc1,$acc1,$t1
+ addze $acc2,$acc2
+ adde $acc3,$acc3,$t3
+
+ std $acc0,0($rp)
+ std $acc1,8($rp)
+ std $acc2,16($rp)
+ std $acc3,24($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,1,0
+ .long 0
+.size __ecp_nistz256_sqr_mont,.-__ecp_nistz256_sqr_mont
+
+# Note that __ecp_nistz256_add expects both input vectors pre-loaded to
+# $a0-$a3 and $t0-$t3. This is done because it's used in multiple
+# contexts, e.g. in multiplication by 2 and 3...
+.type __ecp_nistz256_add,\@function
+.align 4
+__ecp_nistz256_add:
+ addc $acc0,$acc0,$t0 # ret = a+b
+ adde $acc1,$acc1,$t1
+ adde $acc2,$acc2,$t2
+ li $t2,0
+ adde $acc3,$acc3,$t3
+ addze $t0,$t2
+
+ # if a+b >= modulus, subtract modulus
+ #
+ # But since comparison implies subtraction, we subtract
+ # modulus and then add it back if subtraction borrowed.
+
+ subic $acc0,$acc0,-1
+ subfe $acc1,$poly1,$acc1
+ subfe $acc2,$t2,$acc2
+ subfe $acc3,$poly3,$acc3
+ subfe $t0,$t2,$t0
+
+ addc $acc0,$acc0,$t0
+ and $t1,$poly1,$t0
+ and $t3,$poly3,$t0
+ adde $acc1,$acc1,$t1
+ addze $acc2,$acc2
+ adde $acc3,$acc3,$t3
+
+ std $acc0,0($rp)
+ std $acc1,8($rp)
+ std $acc2,16($rp)
+ std $acc3,24($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size __ecp_nistz256_add,.-__ecp_nistz256_add
+
+.type __ecp_nistz256_sub_from,\@function
+.align 4
+__ecp_nistz256_sub_from:
+ ld $t0,0($bp)
+ ld $t1,8($bp)
+ ld $t2,16($bp)
+ ld $t3,24($bp)
+ subfc $acc0,$t0,$acc0 # ret = a-b
+ subfe $acc1,$t1,$acc1
+ subfe $acc2,$t2,$acc2
+ subfe $acc3,$t3,$acc3
+ subfe $t0,$t0,$t0 # t0 = borrow ? -1 : 0
+
+ # if a-b borrowed, add modulus
+
+ addc $acc0,$acc0,$t0 # ret -= modulus & t0
+ and $t1,$poly1,$t0
+ and $t3,$poly3,$t0
+ adde $acc1,$acc1,$t1
+ addze $acc2,$acc2
+ adde $acc3,$acc3,$t3
+
+ std $acc0,0($rp)
+ std $acc1,8($rp)
+ std $acc2,16($rp)
+ std $acc3,24($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size __ecp_nistz256_sub_from,.-__ecp_nistz256_sub_from
+
+.type __ecp_nistz256_sub_morf,\@function
+.align 4
+__ecp_nistz256_sub_morf:
+ ld $t0,0($bp)
+ ld $t1,8($bp)
+ ld $t2,16($bp)
+ ld $t3,24($bp)
+ subfc $acc0,$acc0,$t0 # ret = b-a
+ subfe $acc1,$acc1,$t1
+ subfe $acc2,$acc2,$t2
+ subfe $acc3,$acc3,$t3
+ subfe $t0,$t0,$t0 # t0 = borrow ? -1 : 0
+
+ # if b-a borrowed, add modulus
+
+ addc $acc0,$acc0,$t0 # ret -= modulus & t0
+ and $t1,$poly1,$t0
+ and $t3,$poly3,$t0
+ adde $acc1,$acc1,$t1
+ addze $acc2,$acc2
+ adde $acc3,$acc3,$t3
+
+ std $acc0,0($rp)
+ std $acc1,8($rp)
+ std $acc2,16($rp)
+ std $acc3,24($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size __ecp_nistz256_sub_morf,.-__ecp_nistz256_sub_morf
+
+.type __ecp_nistz256_div_by_2,\@function
+.align 4
+__ecp_nistz256_div_by_2:
+ andi. $t0,$acc0,1
+ addic $acc0,$acc0,-1 # a += modulus
+ neg $t0,$t0
+ adde $acc1,$acc1,$poly1
+ not $t0,$t0
+ addze $acc2,$acc2
+ li $t2,0
+ adde $acc3,$acc3,$poly3
+ and $t1,$poly1,$t0
+ addze $ap,$t2 # ap = carry
+ and $t3,$poly3,$t0
+
+ subfc $acc0,$t0,$acc0 # a -= modulus if a was even
+ subfe $acc1,$t1,$acc1
+ subfe $acc2,$t2,$acc2
+ subfe $acc3,$t3,$acc3
+ subfe $ap, $t2,$ap
+
+ srdi $acc0,$acc0,1
+ sldi $t0,$acc1,63
+ srdi $acc1,$acc1,1
+ sldi $t1,$acc2,63
+ srdi $acc2,$acc2,1
+ sldi $t2,$acc3,63
+ srdi $acc3,$acc3,1
+ sldi $t3,$ap,63
+ or $acc0,$acc0,$t0
+ or $acc1,$acc1,$t1
+ or $acc2,$acc2,$t2
+ or $acc3,$acc3,$t3
+
+ std $acc0,0($rp)
+ std $acc1,8($rp)
+ std $acc2,16($rp)
+ std $acc3,24($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,1,0
+ .long 0
+.size __ecp_nistz256_div_by_2,.-__ecp_nistz256_div_by_2
+___
+########################################################################
+# following subroutines are "literal" implementation of those found in
+# ecp_nistz256.c
+#
+########################################################################
+# void ecp_nistz256_point_double(P256_POINT *out,const P256_POINT *inp);
+#
+if (1) {
+my $FRAME=64+32*4+12*8;
+my ($S,$M,$Zsqr,$tmp0)=map(64+32*$_,(0..3));
+# above map() describes stack layout with 4 temporary
+# 256-bit vectors on top.
+my ($rp_real,$ap_real) = map("r$_",(20,21));
+
+$code.=<<___;
+.globl ecp_nistz256_point_double
+.align 5
+ecp_nistz256_point_double:
+ stdu $sp,-$FRAME($sp)
+ mflr r0
+ std r20,$FRAME-8*12($sp)
+ std r21,$FRAME-8*11($sp)
+ std r22,$FRAME-8*10($sp)
+ std r23,$FRAME-8*9($sp)
+ std r24,$FRAME-8*8($sp)
+ std r25,$FRAME-8*7($sp)
+ std r26,$FRAME-8*6($sp)
+ std r27,$FRAME-8*5($sp)
+ std r28,$FRAME-8*4($sp)
+ std r29,$FRAME-8*3($sp)
+ std r30,$FRAME-8*2($sp)
+ std r31,$FRAME-8*1($sp)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+.Ldouble_shortcut:
+ ld $acc0,32($ap)
+ ld $acc1,40($ap)
+ ld $acc2,48($ap)
+ ld $acc3,56($ap)
+ mr $t0,$acc0
+ mr $t1,$acc1
+ mr $t2,$acc2
+ mr $t3,$acc3
+ ld $a0,64($ap) # forward load for p256_sqr_mont
+ ld $a1,72($ap)
+ ld $a2,80($ap)
+ ld $a3,88($ap)
+ mr $rp_real,$rp
+ mr $ap_real,$ap
+ addi $rp,$sp,$S
+ bl __ecp_nistz256_add # p256_mul_by_2(S, in_y);
+
+ addi $rp,$sp,$Zsqr
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Zsqr, in_z);
+
+ ld $t0,0($ap_real)
+ ld $t1,8($ap_real)
+ ld $t2,16($ap_real)
+ ld $t3,24($ap_real)
+ mr $a0,$acc0 # put Zsqr aside for p256_sub
+ mr $a1,$acc1
+ mr $a2,$acc2
+ mr $a3,$acc3
+ addi $rp,$sp,$M
+ bl __ecp_nistz256_add # p256_add(M, Zsqr, in_x);
+
+ addi $bp,$ap_real,0
+ mr $acc0,$a0 # restore Zsqr
+ mr $acc1,$a1
+ mr $acc2,$a2
+ mr $acc3,$a3
+ ld $a0,$S+0($sp) # forward load for p256_sqr_mont
+ ld $a1,$S+8($sp)
+ ld $a2,$S+16($sp)
+ ld $a3,$S+24($sp)
+ addi $rp,$sp,$Zsqr
+ bl __ecp_nistz256_sub_morf # p256_sub(Zsqr, in_x, Zsqr);
+
+ addi $rp,$sp,$S
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(S, S);
+
+ ld $bi,32($ap_real)
+ ld $a0,64($ap_real)
+ ld $a1,72($ap_real)
+ ld $a2,80($ap_real)
+ ld $a3,88($ap_real)
+ addi $bp,$ap_real,32
+ addi $rp,$sp,$tmp0
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(tmp0, in_z, in_y);
+
+ mr $t0,$acc0
+ mr $t1,$acc1
+ mr $t2,$acc2
+ mr $t3,$acc3
+ ld $a0,$S+0($sp) # forward load for p256_sqr_mont
+ ld $a1,$S+8($sp)
+ ld $a2,$S+16($sp)
+ ld $a3,$S+24($sp)
+ addi $rp,$rp_real,64
+ bl __ecp_nistz256_add # p256_mul_by_2(res_z, tmp0);
+
+ addi $rp,$sp,$tmp0
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(tmp0, S);
+
+ ld $bi,$Zsqr($sp) # forward load for p256_mul_mont
+ ld $a0,$M+0($sp)
+ ld $a1,$M+8($sp)
+ ld $a2,$M+16($sp)
+ ld $a3,$M+24($sp)
+ addi $rp,$rp_real,32
+ bl __ecp_nistz256_div_by_2 # p256_div_by_2(res_y, tmp0);
+
+ addi $bp,$sp,$Zsqr
+ addi $rp,$sp,$M
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(M, M, Zsqr);
+
+ mr $t0,$acc0 # duplicate M
+ mr $t1,$acc1
+ mr $t2,$acc2
+ mr $t3,$acc3
+ mr $a0,$acc0 # put M aside
+ mr $a1,$acc1
+ mr $a2,$acc2
+ mr $a3,$acc3
+ addi $rp,$sp,$M
+ bl __ecp_nistz256_add
+ mr $t0,$a0 # restore M
+ mr $t1,$a1
+ mr $t2,$a2
+ mr $t3,$a3
+ ld $bi,0($ap_real) # forward load for p256_mul_mont
+ ld $a0,$S+0($sp)
+ ld $a1,$S+8($sp)
+ ld $a2,$S+16($sp)
+ ld $a3,$S+24($sp)
+ bl __ecp_nistz256_add # p256_mul_by_3(M, M);
+
+ addi $bp,$ap_real,0
+ addi $rp,$sp,$S
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S, S, in_x);
+
+ mr $t0,$acc0
+ mr $t1,$acc1
+ mr $t2,$acc2
+ mr $t3,$acc3
+ ld $a0,$M+0($sp) # forward load for p256_sqr_mont
+ ld $a1,$M+8($sp)
+ ld $a2,$M+16($sp)
+ ld $a3,$M+24($sp)
+ addi $rp,$sp,$tmp0
+ bl __ecp_nistz256_add # p256_mul_by_2(tmp0, S);
+
+ addi $rp,$rp_real,0
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(res_x, M);
+
+ addi $bp,$sp,$tmp0
+ bl __ecp_nistz256_sub_from # p256_sub(res_x, res_x, tmp0);
+
+ addi $bp,$sp,$S
+ addi $rp,$sp,$S
+ bl __ecp_nistz256_sub_morf # p256_sub(S, S, res_x);
+
+ ld $bi,$M($sp)
+ mr $a0,$acc0 # copy S
+ mr $a1,$acc1
+ mr $a2,$acc2
+ mr $a3,$acc3
+ addi $bp,$sp,$M
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S, S, M);
+
+ addi $bp,$rp_real,32
+ addi $rp,$rp_real,32
+ bl __ecp_nistz256_sub_from # p256_sub(res_y, S, res_y);
+
+ mtlr r0
+ ld r20,$FRAME-8*12($sp)
+ ld r21,$FRAME-8*11($sp)
+ ld r22,$FRAME-8*10($sp)
+ ld r23,$FRAME-8*9($sp)
+ ld r24,$FRAME-8*8($sp)
+ ld r25,$FRAME-8*7($sp)
+ ld r26,$FRAME-8*6($sp)
+ ld r27,$FRAME-8*5($sp)
+ ld r28,$FRAME-8*4($sp)
+ ld r29,$FRAME-8*3($sp)
+ ld r30,$FRAME-8*2($sp)
+ ld r31,$FRAME-8*1($sp)
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,12,2,0
+ .long 0
+.size ecp_nistz256_point_double,.-ecp_nistz256_point_double
+___
+}
+
+########################################################################
+# void ecp_nistz256_point_add(P256_POINT *out,const P256_POINT *in1,
+# const P256_POINT *in2);
+if (1) {
+my $FRAME = 64 + 32*12 + 16*8;
+my ($res_x,$res_y,$res_z,
+ $H,$Hsqr,$R,$Rsqr,$Hcub,
+ $U1,$U2,$S1,$S2)=map(64+32*$_,(0..11));
+my ($Z1sqr, $Z2sqr) = ($Hsqr, $Rsqr);
+# above map() describes stack layout with 12 temporary
+# 256-bit vectors on top.
+my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("r$_",(16..21));
+
+$code.=<<___;
+.globl ecp_nistz256_point_add
+.align 5
+ecp_nistz256_point_add:
+ stdu $sp,-$FRAME($sp)
+ mflr r0
+ std r16,$FRAME-8*16($sp)
+ std r17,$FRAME-8*15($sp)
+ std r18,$FRAME-8*14($sp)
+ std r19,$FRAME-8*13($sp)
+ std r20,$FRAME-8*12($sp)
+ std r21,$FRAME-8*11($sp)
+ std r22,$FRAME-8*10($sp)
+ std r23,$FRAME-8*9($sp)
+ std r24,$FRAME-8*8($sp)
+ std r25,$FRAME-8*7($sp)
+ std r26,$FRAME-8*6($sp)
+ std r27,$FRAME-8*5($sp)
+ std r28,$FRAME-8*4($sp)
+ std r29,$FRAME-8*3($sp)
+ std r30,$FRAME-8*2($sp)
+ std r31,$FRAME-8*1($sp)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ ld $a0,64($bp) # in2_z
+ ld $a1,72($bp)
+ ld $a2,80($bp)
+ ld $a3,88($bp)
+ mr $rp_real,$rp
+ mr $ap_real,$ap
+ mr $bp_real,$bp
+ or $t0,$a0,$a1
+ or $t2,$a2,$a3
+ or $in2infty,$t0,$t2
+ neg $t0,$in2infty
+ or $in2infty,$in2infty,$t0
+ sradi $in2infty,$in2infty,63 # !in2infty
+ addi $rp,$sp,$Z2sqr
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Z2sqr, in2_z);
+
+ ld $a0,64($ap_real) # in1_z
+ ld $a1,72($ap_real)
+ ld $a2,80($ap_real)
+ ld $a3,88($ap_real)
+ or $t0,$a0,$a1
+ or $t2,$a2,$a3
+ or $in1infty,$t0,$t2
+ neg $t0,$in1infty
+ or $in1infty,$in1infty,$t0
+ sradi $in1infty,$in1infty,63 # !in1infty
+ addi $rp,$sp,$Z1sqr
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Z1sqr, in1_z);
+
+ ld $bi,64($bp_real)
+ ld $a0,$Z2sqr+0($sp)
+ ld $a1,$Z2sqr+8($sp)
+ ld $a2,$Z2sqr+16($sp)
+ ld $a3,$Z2sqr+24($sp)
+ addi $bp,$bp_real,64
+ addi $rp,$sp,$S1
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S1, Z2sqr, in2_z);
+
+ ld $bi,64($ap_real)
+ ld $a0,$Z1sqr+0($sp)
+ ld $a1,$Z1sqr+8($sp)
+ ld $a2,$Z1sqr+16($sp)
+ ld $a3,$Z1sqr+24($sp)
+ addi $bp,$ap_real,64
+ addi $rp,$sp,$S2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, Z1sqr, in1_z);
+
+ ld $bi,32($ap_real)
+ ld $a0,$S1+0($sp)
+ ld $a1,$S1+8($sp)
+ ld $a2,$S1+16($sp)
+ ld $a3,$S1+24($sp)
+ addi $bp,$ap_real,32
+ addi $rp,$sp,$S1
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S1, S1, in1_y);
+
+ ld $bi,32($bp_real)
+ ld $a0,$S2+0($sp)
+ ld $a1,$S2+8($sp)
+ ld $a2,$S2+16($sp)
+ ld $a3,$S2+24($sp)
+ addi $bp,$bp_real,32
+ addi $rp,$sp,$S2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, S2, in2_y);
+
+ addi $bp,$sp,$S1
+ ld $bi,$Z2sqr($sp) # forward load for p256_mul_mont
+ ld $a0,0($ap_real)
+ ld $a1,8($ap_real)
+ ld $a2,16($ap_real)
+ ld $a3,24($ap_real)
+ addi $rp,$sp,$R
+ bl __ecp_nistz256_sub_from # p256_sub(R, S2, S1);
+
+ or $acc0,$acc0,$acc1 # see if result is zero
+ or $acc2,$acc2,$acc3
+ or $temp,$acc0,$acc2
+
+ addi $bp,$sp,$Z2sqr
+ addi $rp,$sp,$U1
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(U1, in1_x, Z2sqr);
+
+ ld $bi,$Z1sqr($sp)
+ ld $a0,0($bp_real)
+ ld $a1,8($bp_real)
+ ld $a2,16($bp_real)
+ ld $a3,24($bp_real)
+ addi $bp,$sp,$Z1sqr
+ addi $rp,$sp,$U2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(U2, in2_x, Z1sqr);
+
+ addi $bp,$sp,$U1
+ ld $a0,$R+0($sp) # forward load for p256_sqr_mont
+ ld $a1,$R+8($sp)
+ ld $a2,$R+16($sp)
+ ld $a3,$R+24($sp)
+ addi $rp,$sp,$H
+ bl __ecp_nistz256_sub_from # p256_sub(H, U2, U1);
+
+ or $acc0,$acc0,$acc1 # see if result is zero
+ or $acc2,$acc2,$acc3
+ or. $acc0,$acc0,$acc2
+ bne .Ladd_proceed # is_equal(U1,U2)?
+
+ and. $t0,$in1infty,$in2infty
+ beq .Ladd_proceed # (in1infty || in2infty)?
+
+ cmpldi $temp,0
+ beq .Ladd_double # is_equal(S1,S2)?
+
+ xor $a0,$a0,$a0
+ std $a0,0($rp_real)
+ std $a0,8($rp_real)
+ std $a0,16($rp_real)
+ std $a0,24($rp_real)
+ std $a0,32($rp_real)
+ std $a0,40($rp_real)
+ std $a0,48($rp_real)
+ std $a0,56($rp_real)
+ std $a0,64($rp_real)
+ std $a0,72($rp_real)
+ std $a0,80($rp_real)
+ std $a0,88($rp_real)
+ b .Ladd_done
+
+.align 4
+.Ladd_double:
+ ld $bp,0($sp) # back-link
+ mr $ap,$ap_real
+ mr $rp,$rp_real
+ ld r16,$FRAME-8*16($sp)
+ ld r17,$FRAME-8*15($sp)
+ ld r18,$FRAME-8*14($sp)
+ ld r19,$FRAME-8*13($sp)
+ stdu $bp,$FRAME-288($sp) # difference in stack frame sizes
+ b .Ldouble_shortcut
+
+.align 4
+.Ladd_proceed:
+ addi $rp,$sp,$Rsqr
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Rsqr, R);
+
+ ld $bi,64($ap_real)
+ ld $a0,$H+0($sp)
+ ld $a1,$H+8($sp)
+ ld $a2,$H+16($sp)
+ ld $a3,$H+24($sp)
+ addi $bp,$ap_real,64
+ addi $rp,$sp,$res_z
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(res_z, H, in1_z);
+
+ ld $a0,$H+0($sp)
+ ld $a1,$H+8($sp)
+ ld $a2,$H+16($sp)
+ ld $a3,$H+24($sp)
+ addi $rp,$sp,$Hsqr
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Hsqr, H);
+
+ ld $bi,64($bp_real)
+ ld $a0,$res_z+0($sp)
+ ld $a1,$res_z+8($sp)
+ ld $a2,$res_z+16($sp)
+ ld $a3,$res_z+24($sp)
+ addi $bp,$bp_real,64
+ addi $rp,$sp,$res_z
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(res_z, res_z, in2_z);
+
+ ld $bi,$H($sp)
+ ld $a0,$Hsqr+0($sp)
+ ld $a1,$Hsqr+8($sp)
+ ld $a2,$Hsqr+16($sp)
+ ld $a3,$Hsqr+24($sp)
+ addi $bp,$sp,$H
+ addi $rp,$sp,$Hcub
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(Hcub, Hsqr, H);
+
+ ld $bi,$Hsqr($sp)
+ ld $a0,$U1+0($sp)
+ ld $a1,$U1+8($sp)
+ ld $a2,$U1+16($sp)
+ ld $a3,$U1+24($sp)
+ addi $bp,$sp,$Hsqr
+ addi $rp,$sp,$U2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(U2, U1, Hsqr);
+
+ mr $t0,$acc0
+ mr $t1,$acc1
+ mr $t2,$acc2
+ mr $t3,$acc3
+ addi $rp,$sp,$Hsqr
+ bl __ecp_nistz256_add # p256_mul_by_2(Hsqr, U2);
+
+ addi $bp,$sp,$Rsqr
+ addi $rp,$sp,$res_x
+ bl __ecp_nistz256_sub_morf # p256_sub(res_x, Rsqr, Hsqr);
+
+ addi $bp,$sp,$Hcub
+ bl __ecp_nistz256_sub_from # p256_sub(res_x, res_x, Hcub);
+
+ addi $bp,$sp,$U2
+ ld $bi,$Hcub($sp) # forward load for p256_mul_mont
+ ld $a0,$S1+0($sp)
+ ld $a1,$S1+8($sp)
+ ld $a2,$S1+16($sp)
+ ld $a3,$S1+24($sp)
+ addi $rp,$sp,$res_y
+ bl __ecp_nistz256_sub_morf # p256_sub(res_y, U2, res_x);
+
+ addi $bp,$sp,$Hcub
+ addi $rp,$sp,$S2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, S1, Hcub);
+
+ ld $bi,$R($sp)
+ ld $a0,$res_y+0($sp)
+ ld $a1,$res_y+8($sp)
+ ld $a2,$res_y+16($sp)
+ ld $a3,$res_y+24($sp)
+ addi $bp,$sp,$R
+ addi $rp,$sp,$res_y
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(res_y, res_y, R);
+
+ addi $bp,$sp,$S2
+ bl __ecp_nistz256_sub_from # p256_sub(res_y, res_y, S2);
+
+ ld $t0,0($bp_real) # in2
+ ld $t1,8($bp_real)
+ ld $t2,16($bp_real)
+ ld $t3,24($bp_real)
+ ld $a0,$res_x+0($sp) # res
+ ld $a1,$res_x+8($sp)
+ ld $a2,$res_x+16($sp)
+ ld $a3,$res_x+24($sp)
+___
+for($i=0;$i<64;$i+=32) { # conditional moves
+$code.=<<___;
+ ld $acc0,$i+0($ap_real) # in1
+ ld $acc1,$i+8($ap_real)
+ ld $acc2,$i+16($ap_real)
+ ld $acc3,$i+24($ap_real)
+ andc $t0,$t0,$in1infty
+ andc $t1,$t1,$in1infty
+ andc $t2,$t2,$in1infty
+ andc $t3,$t3,$in1infty
+ and $a0,$a0,$in1infty
+ and $a1,$a1,$in1infty
+ and $a2,$a2,$in1infty
+ and $a3,$a3,$in1infty
+ or $t0,$t0,$a0
+ or $t1,$t1,$a1
+ or $t2,$t2,$a2
+ or $t3,$t3,$a3
+ andc $acc0,$acc0,$in2infty
+ andc $acc1,$acc1,$in2infty
+ andc $acc2,$acc2,$in2infty
+ andc $acc3,$acc3,$in2infty
+ and $t0,$t0,$in2infty
+ and $t1,$t1,$in2infty
+ and $t2,$t2,$in2infty
+ and $t3,$t3,$in2infty
+ or $acc0,$acc0,$t0
+ or $acc1,$acc1,$t1
+ or $acc2,$acc2,$t2
+ or $acc3,$acc3,$t3
+
+ ld $t0,$i+32($bp_real) # in2
+ ld $t1,$i+40($bp_real)
+ ld $t2,$i+48($bp_real)
+ ld $t3,$i+56($bp_real)
+ ld $a0,$res_x+$i+32($sp)
+ ld $a1,$res_x+$i+40($sp)
+ ld $a2,$res_x+$i+48($sp)
+ ld $a3,$res_x+$i+56($sp)
+ std $acc0,$i+0($rp_real)
+ std $acc1,$i+8($rp_real)
+ std $acc2,$i+16($rp_real)
+ std $acc3,$i+24($rp_real)
+___
+}
+$code.=<<___;
+ ld $acc0,$i+0($ap_real) # in1
+ ld $acc1,$i+8($ap_real)
+ ld $acc2,$i+16($ap_real)
+ ld $acc3,$i+24($ap_real)
+ andc $t0,$t0,$in1infty
+ andc $t1,$t1,$in1infty
+ andc $t2,$t2,$in1infty
+ andc $t3,$t3,$in1infty
+ and $a0,$a0,$in1infty
+ and $a1,$a1,$in1infty
+ and $a2,$a2,$in1infty
+ and $a3,$a3,$in1infty
+ or $t0,$t0,$a0
+ or $t1,$t1,$a1
+ or $t2,$t2,$a2
+ or $t3,$t3,$a3
+ andc $acc0,$acc0,$in2infty
+ andc $acc1,$acc1,$in2infty
+ andc $acc2,$acc2,$in2infty
+ andc $acc3,$acc3,$in2infty
+ and $t0,$t0,$in2infty
+ and $t1,$t1,$in2infty
+ and $t2,$t2,$in2infty
+ and $t3,$t3,$in2infty
+ or $acc0,$acc0,$t0
+ or $acc1,$acc1,$t1
+ or $acc2,$acc2,$t2
+ or $acc3,$acc3,$t3
+ std $acc0,$i+0($rp_real)
+ std $acc1,$i+8($rp_real)
+ std $acc2,$i+16($rp_real)
+ std $acc3,$i+24($rp_real)
+
+.Ladd_done:
+ mtlr r0
+ ld r16,$FRAME-8*16($sp)
+ ld r17,$FRAME-8*15($sp)
+ ld r18,$FRAME-8*14($sp)
+ ld r19,$FRAME-8*13($sp)
+ ld r20,$FRAME-8*12($sp)
+ ld r21,$FRAME-8*11($sp)
+ ld r22,$FRAME-8*10($sp)
+ ld r23,$FRAME-8*9($sp)
+ ld r24,$FRAME-8*8($sp)
+ ld r25,$FRAME-8*7($sp)
+ ld r26,$FRAME-8*6($sp)
+ ld r27,$FRAME-8*5($sp)
+ ld r28,$FRAME-8*4($sp)
+ ld r29,$FRAME-8*3($sp)
+ ld r30,$FRAME-8*2($sp)
+ ld r31,$FRAME-8*1($sp)
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,16,3,0
+ .long 0
+.size ecp_nistz256_point_add,.-ecp_nistz256_point_add
+___
+}
+
+########################################################################
+# void ecp_nistz256_point_add_affine(P256_POINT *out,const P256_POINT *in1,
+# const P256_POINT_AFFINE *in2);
+if (1) {
+my $FRAME = 64 + 32*10 + 16*8;
+my ($res_x,$res_y,$res_z,
+ $U2,$S2,$H,$R,$Hsqr,$Hcub,$Rsqr)=map(64+32*$_,(0..9));
+my $Z1sqr = $S2;
+# above map() describes stack layout with 10 temporary
+# 256-bit vectors on top.
+my ($rp_real,$ap_real,$bp_real,$in1infty,$in2infty,$temp)=map("r$_",(16..21));
+
+$code.=<<___;
+.globl ecp_nistz256_point_add_affine
+.align 5
+ecp_nistz256_point_add_affine:
+ stdu $sp,-$FRAME($sp)
+ mflr r0
+ std r16,$FRAME-8*16($sp)
+ std r17,$FRAME-8*15($sp)
+ std r18,$FRAME-8*14($sp)
+ std r19,$FRAME-8*13($sp)
+ std r20,$FRAME-8*12($sp)
+ std r21,$FRAME-8*11($sp)
+ std r22,$FRAME-8*10($sp)
+ std r23,$FRAME-8*9($sp)
+ std r24,$FRAME-8*8($sp)
+ std r25,$FRAME-8*7($sp)
+ std r26,$FRAME-8*6($sp)
+ std r27,$FRAME-8*5($sp)
+ std r28,$FRAME-8*4($sp)
+ std r29,$FRAME-8*3($sp)
+ std r30,$FRAME-8*2($sp)
+ std r31,$FRAME-8*1($sp)
+
+ li $poly1,-1
+ srdi $poly1,$poly1,32 # 0x00000000ffffffff
+ li $poly3,1
+ orc $poly3,$poly3,$poly1 # 0xffffffff00000001
+
+ mr $rp_real,$rp
+ mr $ap_real,$ap
+ mr $bp_real,$bp
+
+ ld $a0,64($ap) # in1_z
+ ld $a1,72($ap)
+ ld $a2,80($ap)
+ ld $a3,88($ap)
+ or $t0,$a0,$a1
+ or $t2,$a2,$a3
+ or $in1infty,$t0,$t2
+ neg $t0,$in1infty
+ or $in1infty,$in1infty,$t0
+ sradi $in1infty,$in1infty,63 # !in1infty
+
+ ld $acc0,0($bp) # in2_x
+ ld $acc1,8($bp)
+ ld $acc2,16($bp)
+ ld $acc3,24($bp)
+ ld $t0,32($bp) # in2_y
+ ld $t1,40($bp)
+ ld $t2,48($bp)
+ ld $t3,56($bp)
+ or $acc0,$acc0,$acc1
+ or $acc2,$acc2,$acc3
+ or $acc0,$acc0,$acc2
+ or $t0,$t0,$t1
+ or $t2,$t2,$t3
+ or $t0,$t0,$t2
+ or $in2infty,$acc0,$t0
+ neg $t0,$in2infty
+ or $in2infty,$in2infty,$t0
+ sradi $in2infty,$in2infty,63 # !in2infty
+
+ addi $rp,$sp,$Z1sqr
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Z1sqr, in1_z);
+
+ mr $a0,$acc0
+ mr $a1,$acc1
+ mr $a2,$acc2
+ mr $a3,$acc3
+ ld $bi,0($bp_real)
+ addi $bp,$bp_real,0
+ addi $rp,$sp,$U2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(U2, Z1sqr, in2_x);
+
+ addi $bp,$ap_real,0
+ ld $bi,64($ap_real) # forward load for p256_mul_mont
+ ld $a0,$Z1sqr+0($sp)
+ ld $a1,$Z1sqr+8($sp)
+ ld $a2,$Z1sqr+16($sp)
+ ld $a3,$Z1sqr+24($sp)
+ addi $rp,$sp,$H
+ bl __ecp_nistz256_sub_from # p256_sub(H, U2, in1_x);
+
+ addi $bp,$ap_real,64
+ addi $rp,$sp,$S2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, Z1sqr, in1_z);
+
+ ld $bi,64($ap_real)
+ ld $a0,$H+0($sp)
+ ld $a1,$H+8($sp)
+ ld $a2,$H+16($sp)
+ ld $a3,$H+24($sp)
+ addi $bp,$ap_real,64
+ addi $rp,$sp,$res_z
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(res_z, H, in1_z);
+
+ ld $bi,32($bp_real)
+ ld $a0,$S2+0($sp)
+ ld $a1,$S2+8($sp)
+ ld $a2,$S2+16($sp)
+ ld $a3,$S2+24($sp)
+ addi $bp,$bp_real,32
+ addi $rp,$sp,$S2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, S2, in2_y);
+
+ addi $bp,$ap_real,32
+ ld $a0,$H+0($sp) # forward load for p256_sqr_mont
+ ld $a1,$H+8($sp)
+ ld $a2,$H+16($sp)
+ ld $a3,$H+24($sp)
+ addi $rp,$sp,$R
+ bl __ecp_nistz256_sub_from # p256_sub(R, S2, in1_y);
+
+ addi $rp,$sp,$Hsqr
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Hsqr, H);
+
+ ld $a0,$R+0($sp)
+ ld $a1,$R+8($sp)
+ ld $a2,$R+16($sp)
+ ld $a3,$R+24($sp)
+ addi $rp,$sp,$Rsqr
+ bl __ecp_nistz256_sqr_mont # p256_sqr_mont(Rsqr, R);
+
+ ld $bi,$H($sp)
+ ld $a0,$Hsqr+0($sp)
+ ld $a1,$Hsqr+8($sp)
+ ld $a2,$Hsqr+16($sp)
+ ld $a3,$Hsqr+24($sp)
+ addi $bp,$sp,$H
+ addi $rp,$sp,$Hcub
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(Hcub, Hsqr, H);
+
+ ld $bi,0($ap_real)
+ ld $a0,$Hsqr+0($sp)
+ ld $a1,$Hsqr+8($sp)
+ ld $a2,$Hsqr+16($sp)
+ ld $a3,$Hsqr+24($sp)
+ addi $bp,$ap_real,0
+ addi $rp,$sp,$U2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(U2, in1_x, Hsqr);
+
+ mr $t0,$acc0
+ mr $t1,$acc1
+ mr $t2,$acc2
+ mr $t3,$acc3
+ addi $rp,$sp,$Hsqr
+ bl __ecp_nistz256_add # p256_mul_by_2(Hsqr, U2);
+
+ addi $bp,$sp,$Rsqr
+ addi $rp,$sp,$res_x
+ bl __ecp_nistz256_sub_morf # p256_sub(res_x, Rsqr, Hsqr);
+
+ addi $bp,$sp,$Hcub
+ bl __ecp_nistz256_sub_from # p256_sub(res_x, res_x, Hcub);
+
+ addi $bp,$sp,$U2
+ ld $bi,32($ap_real) # forward load for p256_mul_mont
+ ld $a0,$Hcub+0($sp)
+ ld $a1,$Hcub+8($sp)
+ ld $a2,$Hcub+16($sp)
+ ld $a3,$Hcub+24($sp)
+ addi $rp,$sp,$res_y
+ bl __ecp_nistz256_sub_morf # p256_sub(res_y, U2, res_x);
+
+ addi $bp,$ap_real,32
+ addi $rp,$sp,$S2
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(S2, in1_y, Hcub);
+
+ ld $bi,$R($sp)
+ ld $a0,$res_y+0($sp)
+ ld $a1,$res_y+8($sp)
+ ld $a2,$res_y+16($sp)
+ ld $a3,$res_y+24($sp)
+ addi $bp,$sp,$R
+ addi $rp,$sp,$res_y
+ bl __ecp_nistz256_mul_mont # p256_mul_mont(res_y, res_y, R);
+
+ addi $bp,$sp,$S2
+ bl __ecp_nistz256_sub_from # p256_sub(res_y, res_y, S2);
+
+ ld $t0,0($bp_real) # in2
+ ld $t1,8($bp_real)
+ ld $t2,16($bp_real)
+ ld $t3,24($bp_real)
+ ld $a0,$res_x+0($sp) # res
+ ld $a1,$res_x+8($sp)
+ ld $a2,$res_x+16($sp)
+ ld $a3,$res_x+24($sp)
+___
+for($i=0;$i<64;$i+=32) { # conditional moves
+$code.=<<___;
+ ld $acc0,$i+0($ap_real) # in1
+ ld $acc1,$i+8($ap_real)
+ ld $acc2,$i+16($ap_real)
+ ld $acc3,$i+24($ap_real)
+ andc $t0,$t0,$in1infty
+ andc $t1,$t1,$in1infty
+ andc $t2,$t2,$in1infty
+ andc $t3,$t3,$in1infty
+ and $a0,$a0,$in1infty
+ and $a1,$a1,$in1infty
+ and $a2,$a2,$in1infty
+ and $a3,$a3,$in1infty
+ or $t0,$t0,$a0
+ or $t1,$t1,$a1
+ or $t2,$t2,$a2
+ or $t3,$t3,$a3
+ andc $acc0,$acc0,$in2infty
+ andc $acc1,$acc1,$in2infty
+ andc $acc2,$acc2,$in2infty
+ andc $acc3,$acc3,$in2infty
+ and $t0,$t0,$in2infty
+ and $t1,$t1,$in2infty
+ and $t2,$t2,$in2infty
+ and $t3,$t3,$in2infty
+ or $acc0,$acc0,$t0
+ or $acc1,$acc1,$t1
+ or $acc2,$acc2,$t2
+ or $acc3,$acc3,$t3
+___
+$code.=<<___ if ($i==0);
+ ld $t0,32($bp_real) # in2
+ ld $t1,40($bp_real)
+ ld $t2,48($bp_real)
+ ld $t3,56($bp_real)
+___
+$code.=<<___ if ($i==32);
+ li $t0,1 # Lone_mont
+ not $t1,$poly1
+ li $t2,-1
+ not $t3,$poly3
+___
+$code.=<<___;
+ ld $a0,$res_x+$i+32($sp)
+ ld $a1,$res_x+$i+40($sp)
+ ld $a2,$res_x+$i+48($sp)
+ ld $a3,$res_x+$i+56($sp)
+ std $acc0,$i+0($rp_real)
+ std $acc1,$i+8($rp_real)
+ std $acc2,$i+16($rp_real)
+ std $acc3,$i+24($rp_real)
+___
+}
+$code.=<<___;
+ ld $acc0,$i+0($ap_real) # in1
+ ld $acc1,$i+8($ap_real)
+ ld $acc2,$i+16($ap_real)
+ ld $acc3,$i+24($ap_real)
+ andc $t0,$t0,$in1infty
+ andc $t1,$t1,$in1infty
+ andc $t2,$t2,$in1infty
+ andc $t3,$t3,$in1infty
+ and $a0,$a0,$in1infty
+ and $a1,$a1,$in1infty
+ and $a2,$a2,$in1infty
+ and $a3,$a3,$in1infty
+ or $t0,$t0,$a0
+ or $t1,$t1,$a1
+ or $t2,$t2,$a2
+ or $t3,$t3,$a3
+ andc $acc0,$acc0,$in2infty
+ andc $acc1,$acc1,$in2infty
+ andc $acc2,$acc2,$in2infty
+ andc $acc3,$acc3,$in2infty
+ and $t0,$t0,$in2infty
+ and $t1,$t1,$in2infty
+ and $t2,$t2,$in2infty
+ and $t3,$t3,$in2infty
+ or $acc0,$acc0,$t0
+ or $acc1,$acc1,$t1
+ or $acc2,$acc2,$t2
+ or $acc3,$acc3,$t3
+ std $acc0,$i+0($rp_real)
+ std $acc1,$i+8($rp_real)
+ std $acc2,$i+16($rp_real)
+ std $acc3,$i+24($rp_real)
+
+ mtlr r0
+ ld r16,$FRAME-8*16($sp)
+ ld r17,$FRAME-8*15($sp)
+ ld r18,$FRAME-8*14($sp)
+ ld r19,$FRAME-8*13($sp)
+ ld r20,$FRAME-8*12($sp)
+ ld r21,$FRAME-8*11($sp)
+ ld r22,$FRAME-8*10($sp)
+ ld r23,$FRAME-8*9($sp)
+ ld r24,$FRAME-8*8($sp)
+ ld r25,$FRAME-8*7($sp)
+ ld r26,$FRAME-8*6($sp)
+ ld r27,$FRAME-8*5($sp)
+ ld r28,$FRAME-8*4($sp)
+ ld r29,$FRAME-8*3($sp)
+ ld r30,$FRAME-8*2($sp)
+ ld r31,$FRAME-8*1($sp)
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,16,3,0
+ .long 0
+.size ecp_nistz256_point_add_affine,.-ecp_nistz256_point_add_affine
+___
+}
+if (1) {
+my ($ordk,$ord0,$ord1,$t4) = map("r$_",(18..21));
+my ($ord2,$ord3,$zr) = ($poly1,$poly3,"r0");
+
+$code.=<<___;
+########################################################################
+# void ecp_nistz256_ord_mul_mont(uint64_t res[4], uint64_t a[4],
+# uint64_t b[4]);
+.globl ecp_nistz256_ord_mul_mont
+.align 5
+ecp_nistz256_ord_mul_mont:
+ stdu $sp,-160($sp)
+ std r18,48($sp)
+ std r19,56($sp)
+ std r20,64($sp)
+ std r21,72($sp)
+ std r22,80($sp)
+ std r23,88($sp)
+ std r24,96($sp)
+ std r25,104($sp)
+ std r26,112($sp)
+ std r27,120($sp)
+ std r28,128($sp)
+ std r29,136($sp)
+ std r30,144($sp)
+ std r31,152($sp)
+
+ ld $a0,0($ap)
+ ld $bi,0($bp)
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+
+ lis $ordk,0xccd1
+ lis $ord0,0xf3b9
+ lis $ord1,0xbce6
+ ori $ordk,$ordk,0xc8aa
+ ori $ord0,$ord0,0xcac2
+ ori $ord1,$ord1,0xfaad
+ sldi $ordk,$ordk,32
+ sldi $ord0,$ord0,32
+ sldi $ord1,$ord1,32
+ oris $ordk,$ordk,0xee00
+ oris $ord0,$ord0,0xfc63
+ oris $ord1,$ord1,0xa717
+ ori $ordk,$ordk,0xbc4f # 0xccd1c8aaee00bc4f
+ ori $ord0,$ord0,0x2551 # 0xf3b9cac2fc632551
+ ori $ord1,$ord1,0x9e84 # 0xbce6faada7179e84
+ li $ord2,-1 # 0xffffffffffffffff
+ sldi $ord3,$ord2,32 # 0xffffffff00000000
+ li $zr,0
+
+ mulld $acc0,$a0,$bi # a[0]*b[0]
+ mulhdu $t0,$a0,$bi
+
+ mulld $acc1,$a1,$bi # a[1]*b[0]
+ mulhdu $t1,$a1,$bi
+
+ mulld $acc2,$a2,$bi # a[2]*b[0]
+ mulhdu $t2,$a2,$bi
+
+ mulld $acc3,$a3,$bi # a[3]*b[0]
+ mulhdu $acc4,$a3,$bi
+
+ mulld $t4,$acc0,$ordk
+
+ addc $acc1,$acc1,$t0 # accumulate high parts of multiplication
+ adde $acc2,$acc2,$t1
+ adde $acc3,$acc3,$t2
+ addze $acc4,$acc4
+ li $acc5,0
+___
+for ($i=1;$i<4;$i++) {
+ ################################################################
+ # ffff0000.ffffffff.yyyyyyyy.zzzzzzzz
+ # * abcdefgh
+ # + xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
+ #
+ # Now observing that ff..ff*x = (2^n-1)*x = 2^n*x-x, we
+ # rewrite above as:
+ #
+ # xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx.xxxxxxxx
+ # - 0000abcd.efgh0000.abcdefgh.00000000.00000000
+ # + abcdefgh.abcdefgh.yzayzbyz.cyzdyzey.zfyzgyzh
+$code.=<<___;
+ ld $bi,8*$i($bp) # b[i]
+
+ sldi $t0,$t4,32
+ subfc $acc2,$t4,$acc2
+ srdi $t1,$t4,32
+ subfe $acc3,$t0,$acc3
+ subfe $acc4,$t1,$acc4
+ subfe $acc5,$zr,$acc5
+
+ addic $t0,$acc0,-1 # discarded
+ mulhdu $t1,$ord0,$t4
+ mulld $t2,$ord1,$t4
+ mulhdu $t3,$ord1,$t4
+
+ adde $t2,$t2,$t1
+ mulld $t0,$a0,$bi
+ addze $t3,$t3
+ mulld $t1,$a1,$bi
+
+ addc $acc0,$acc1,$t2
+ mulld $t2,$a2,$bi
+ adde $acc1,$acc2,$t3
+ mulld $t3,$a3,$bi
+ adde $acc2,$acc3,$t4
+ adde $acc3,$acc4,$t4
+ addze $acc4,$acc5
+
+ addc $acc0,$acc0,$t0 # accumulate low parts
+ mulhdu $t0,$a0,$bi
+ adde $acc1,$acc1,$t1
+ mulhdu $t1,$a1,$bi
+ adde $acc2,$acc2,$t2
+ mulhdu $t2,$a2,$bi
+ adde $acc3,$acc3,$t3
+ mulhdu $t3,$a3,$bi
+ addze $acc4,$acc4
+ mulld $t4,$acc0,$ordk
+ addc $acc1,$acc1,$t0 # accumulate high parts
+ adde $acc2,$acc2,$t1
+ adde $acc3,$acc3,$t2
+ adde $acc4,$acc4,$t3
+ addze $acc5,$zr
+___
+}
+$code.=<<___;
+ sldi $t0,$t4,32 # last reduction
+ subfc $acc2,$t4,$acc2
+ srdi $t1,$t4,32
+ subfe $acc3,$t0,$acc3
+ subfe $acc4,$t1,$acc4
+ subfe $acc5,$zr,$acc5
+
+ addic $t0,$acc0,-1 # discarded
+ mulhdu $t1,$ord0,$t4
+ mulld $t2,$ord1,$t4
+ mulhdu $t3,$ord1,$t4
+
+ adde $t2,$t2,$t1
+ addze $t3,$t3
+
+ addc $acc0,$acc1,$t2
+ adde $acc1,$acc2,$t3
+ adde $acc2,$acc3,$t4
+ adde $acc3,$acc4,$t4
+ addze $acc4,$acc5
+
+ subfc $acc0,$ord0,$acc0 # ret -= modulus
+ subfe $acc1,$ord1,$acc1
+ subfe $acc2,$ord2,$acc2
+ subfe $acc3,$ord3,$acc3
+ subfe $acc4,$zr,$acc4
+
+ and $t0,$ord0,$acc4
+ and $t1,$ord1,$acc4
+ addc $acc0,$acc0,$t0 # ret += modulus if borrow
+ and $t3,$ord3,$acc4
+ adde $acc1,$acc1,$t1
+ adde $acc2,$acc2,$acc4
+ adde $acc3,$acc3,$t3
+
+ std $acc0,0($rp)
+ std $acc1,8($rp)
+ std $acc2,16($rp)
+ std $acc3,24($rp)
+
+ ld r18,48($sp)
+ ld r19,56($sp)
+ ld r20,64($sp)
+ ld r21,72($sp)
+ ld r22,80($sp)
+ ld r23,88($sp)
+ ld r24,96($sp)
+ ld r25,104($sp)
+ ld r26,112($sp)
+ ld r27,120($sp)
+ ld r28,128($sp)
+ ld r29,136($sp)
+ ld r30,144($sp)
+ ld r31,152($sp)
+ addi $sp,$sp,160
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,14,3,0
+ .long 0
+.size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
+
+################################################################################
+# void ecp_nistz256_ord_sqr_mont(uint64_t res[4], uint64_t a[4],
+# int rep);
+.globl ecp_nistz256_ord_sqr_mont
+.align 5
+ecp_nistz256_ord_sqr_mont:
+ stdu $sp,-160($sp)
+ std r18,48($sp)
+ std r19,56($sp)
+ std r20,64($sp)
+ std r21,72($sp)
+ std r22,80($sp)
+ std r23,88($sp)
+ std r24,96($sp)
+ std r25,104($sp)
+ std r26,112($sp)
+ std r27,120($sp)
+ std r28,128($sp)
+ std r29,136($sp)
+ std r30,144($sp)
+ std r31,152($sp)
+
+ mtctr $bp
+
+ ld $a0,0($ap)
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+
+ lis $ordk,0xccd1
+ lis $ord0,0xf3b9
+ lis $ord1,0xbce6
+ ori $ordk,$ordk,0xc8aa
+ ori $ord0,$ord0,0xcac2
+ ori $ord1,$ord1,0xfaad
+ sldi $ordk,$ordk,32
+ sldi $ord0,$ord0,32
+ sldi $ord1,$ord1,32
+ oris $ordk,$ordk,0xee00
+ oris $ord0,$ord0,0xfc63
+ oris $ord1,$ord1,0xa717
+ ori $ordk,$ordk,0xbc4f # 0xccd1c8aaee00bc4f
+ ori $ord0,$ord0,0x2551 # 0xf3b9cac2fc632551
+ ori $ord1,$ord1,0x9e84 # 0xbce6faada7179e84
+ li $ord2,-1 # 0xffffffffffffffff
+ sldi $ord3,$ord2,32 # 0xffffffff00000000
+ li $zr,0
+ b .Loop_ord_sqr
+
+.align 5
+.Loop_ord_sqr:
+ ################################################################
+ # | | | | | |a1*a0| |
+ # | | | | |a2*a0| | |
+ # | |a3*a2|a3*a0| | | |
+ # | | | |a2*a1| | | |
+ # | | |a3*a1| | | | |
+ # *| | | | | | | | 2|
+ # +|a3*a3|a2*a2|a1*a1|a0*a0|
+ # |--+--+--+--+--+--+--+--|
+ # |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
+ #
+ # "can't overflow" below mark carrying into high part of
+ # multiplication result, which can't overflow, because it
+ # can never be all ones.
+
+ mulld $acc1,$a1,$a0 # a[1]*a[0]
+ mulhdu $t1,$a1,$a0
+ mulld $acc2,$a2,$a0 # a[2]*a[0]
+ mulhdu $t2,$a2,$a0
+ mulld $acc3,$a3,$a0 # a[3]*a[0]
+ mulhdu $acc4,$a3,$a0
+
+ addc $acc2,$acc2,$t1 # accumulate high parts of multiplication
+ mulld $t0,$a2,$a1 # a[2]*a[1]
+ mulhdu $t1,$a2,$a1
+ adde $acc3,$acc3,$t2
+ mulld $t2,$a3,$a1 # a[3]*a[1]
+ mulhdu $t3,$a3,$a1
+ addze $acc4,$acc4 # can't overflow
+
+ mulld $acc5,$a3,$a2 # a[3]*a[2]
+ mulhdu $acc6,$a3,$a2
+
+ addc $t1,$t1,$t2 # accumulate high parts of multiplication
+ mulld $acc0,$a0,$a0 # a[0]*a[0]
+ addze $t2,$t3 # can't overflow
+
+ addc $acc3,$acc3,$t0 # accumulate low parts of multiplication
+ mulhdu $a0,$a0,$a0
+ adde $acc4,$acc4,$t1
+ mulld $t1,$a1,$a1 # a[1]*a[1]
+ adde $acc5,$acc5,$t2
+ mulhdu $a1,$a1,$a1
+ addze $acc6,$acc6 # can't overflow
+
+ addc $acc1,$acc1,$acc1 # acc[1-6]*=2
+ mulld $t2,$a2,$a2 # a[2]*a[2]
+ adde $acc2,$acc2,$acc2
+ mulhdu $a2,$a2,$a2
+ adde $acc3,$acc3,$acc3
+ mulld $t3,$a3,$a3 # a[3]*a[3]
+ adde $acc4,$acc4,$acc4
+ mulhdu $a3,$a3,$a3
+ adde $acc5,$acc5,$acc5
+ adde $acc6,$acc6,$acc6
+ addze $acc7,$zr
+
+ addc $acc1,$acc1,$a0 # +a[i]*a[i]
+ mulld $t4,$acc0,$ordk
+ adde $acc2,$acc2,$t1
+ adde $acc3,$acc3,$a1
+ adde $acc4,$acc4,$t2
+ adde $acc5,$acc5,$a2
+ adde $acc6,$acc6,$t3
+ adde $acc7,$acc7,$a3
+___
+for($i=0; $i<4; $i++) { # reductions
+$code.=<<___;
+ addic $t0,$acc0,-1 # discarded
+ mulhdu $t1,$ord0,$t4
+ mulld $t2,$ord1,$t4
+ mulhdu $t3,$ord1,$t4
+
+ adde $t2,$t2,$t1
+ addze $t3,$t3
+
+ addc $acc0,$acc1,$t2
+ adde $acc1,$acc2,$t3
+ adde $acc2,$acc3,$t4
+ adde $acc3,$zr,$t4 # can't overflow
+___
+$code.=<<___ if ($i<3);
+ mulld $t3,$acc0,$ordk
+___
+$code.=<<___;
+ sldi $t0,$t4,32
+ subfc $acc1,$t4,$acc1
+ srdi $t1,$t4,32
+ subfe $acc2,$t0,$acc2
+ subfe $acc3,$t1,$acc3 # can't borrow
+___
+ ($t3,$t4) = ($t4,$t3);
+}
+$code.=<<___;
+ addc $acc0,$acc0,$acc4 # accumulate upper half
+ adde $acc1,$acc1,$acc5
+ adde $acc2,$acc2,$acc6
+ adde $acc3,$acc3,$acc7
+ addze $acc4,$zr
+
+ subfc $acc0,$ord0,$acc0 # ret -= modulus
+ subfe $acc1,$ord1,$acc1
+ subfe $acc2,$ord2,$acc2
+ subfe $acc3,$ord3,$acc3
+ subfe $acc4,$zr,$acc4
+
+ and $t0,$ord0,$acc4
+ and $t1,$ord1,$acc4
+ addc $a0,$acc0,$t0 # ret += modulus if borrow
+ and $t3,$ord3,$acc4
+ adde $a1,$acc1,$t1
+ adde $a2,$acc2,$acc4
+ adde $a3,$acc3,$t3
+
+ bdnz .Loop_ord_sqr
+
+ std $a0,0($rp)
+ std $a1,8($rp)
+ std $a2,16($rp)
+ std $a3,24($rp)
+
+ ld r18,48($sp)
+ ld r19,56($sp)
+ ld r20,64($sp)
+ ld r21,72($sp)
+ ld r22,80($sp)
+ ld r23,88($sp)
+ ld r24,96($sp)
+ ld r25,104($sp)
+ ld r26,112($sp)
+ ld r27,120($sp)
+ ld r28,128($sp)
+ ld r29,136($sp)
+ ld r30,144($sp)
+ ld r31,152($sp)
+ addi $sp,$sp,160
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,14,3,0
+ .long 0
+.size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
+___
+} }
+
+########################################################################
+# scatter-gather subroutines
+{
+my ($out,$inp,$index,$mask)=map("r$_",(3..7));
+$code.=<<___;
+########################################################################
+# void ecp_nistz256_scatter_w5(void *out, const P256_POINT *inp,
+# int index);
+.globl ecp_nistz256_scatter_w5
+.align 4
+ecp_nistz256_scatter_w5:
+ slwi $index,$index,2
+ add $out,$out,$index
+
+ ld r8, 0($inp) # X
+ ld r9, 8($inp)
+ ld r10,16($inp)
+ ld r11,24($inp)
+
+ stw r8, 64*0-4($out)
+ srdi r8, r8, 32
+ stw r9, 64*1-4($out)
+ srdi r9, r9, 32
+ stw r10,64*2-4($out)
+ srdi r10,r10,32
+ stw r11,64*3-4($out)
+ srdi r11,r11,32
+ stw r8, 64*4-4($out)
+ stw r9, 64*5-4($out)
+ stw r10,64*6-4($out)
+ stw r11,64*7-4($out)
+ addi $out,$out,64*8
+
+ ld r8, 32($inp) # Y
+ ld r9, 40($inp)
+ ld r10,48($inp)
+ ld r11,56($inp)
+
+ stw r8, 64*0-4($out)
+ srdi r8, r8, 32
+ stw r9, 64*1-4($out)
+ srdi r9, r9, 32
+ stw r10,64*2-4($out)
+ srdi r10,r10,32
+ stw r11,64*3-4($out)
+ srdi r11,r11,32
+ stw r8, 64*4-4($out)
+ stw r9, 64*5-4($out)
+ stw r10,64*6-4($out)
+ stw r11,64*7-4($out)
+ addi $out,$out,64*8
+
+ ld r8, 64($inp) # Z
+ ld r9, 72($inp)
+ ld r10,80($inp)
+ ld r11,88($inp)
+
+ stw r8, 64*0-4($out)
+ srdi r8, r8, 32
+ stw r9, 64*1-4($out)
+ srdi r9, r9, 32
+ stw r10,64*2-4($out)
+ srdi r10,r10,32
+ stw r11,64*3-4($out)
+ srdi r11,r11,32
+ stw r8, 64*4-4($out)
+ stw r9, 64*5-4($out)
+ stw r10,64*6-4($out)
+ stw r11,64*7-4($out)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size ecp_nistz256_scatter_w5,.-ecp_nistz256_scatter_w5
+
+########################################################################
+# void ecp_nistz256_gather_w5(P256_POINT *out, const void *inp,
+# int index);
+.globl ecp_nistz256_gather_w5
+.align 4
+ecp_nistz256_gather_w5:
+ neg r0,$index
+ sradi r0,r0,63
+
+ add $index,$index,r0
+ slwi $index,$index,2
+ add $inp,$inp,$index
+
+ lwz r5, 64*0($inp)
+ lwz r6, 64*1($inp)
+ lwz r7, 64*2($inp)
+ lwz r8, 64*3($inp)
+ lwz r9, 64*4($inp)
+ lwz r10,64*5($inp)
+ lwz r11,64*6($inp)
+ lwz r12,64*7($inp)
+ addi $inp,$inp,64*8
+ sldi r9, r9, 32
+ sldi r10,r10,32
+ sldi r11,r11,32
+ sldi r12,r12,32
+ or r5,r5,r9
+ or r6,r6,r10
+ or r7,r7,r11
+ or r8,r8,r12
+ and r5,r5,r0
+ and r6,r6,r0
+ and r7,r7,r0
+ and r8,r8,r0
+ std r5,0($out) # X
+ std r6,8($out)
+ std r7,16($out)
+ std r8,24($out)
+
+ lwz r5, 64*0($inp)
+ lwz r6, 64*1($inp)
+ lwz r7, 64*2($inp)
+ lwz r8, 64*3($inp)
+ lwz r9, 64*4($inp)
+ lwz r10,64*5($inp)
+ lwz r11,64*6($inp)
+ lwz r12,64*7($inp)
+ addi $inp,$inp,64*8
+ sldi r9, r9, 32
+ sldi r10,r10,32
+ sldi r11,r11,32
+ sldi r12,r12,32
+ or r5,r5,r9
+ or r6,r6,r10
+ or r7,r7,r11
+ or r8,r8,r12
+ and r5,r5,r0
+ and r6,r6,r0
+ and r7,r7,r0
+ and r8,r8,r0
+ std r5,32($out) # Y
+ std r6,40($out)
+ std r7,48($out)
+ std r8,56($out)
+
+ lwz r5, 64*0($inp)
+ lwz r6, 64*1($inp)
+ lwz r7, 64*2($inp)
+ lwz r8, 64*3($inp)
+ lwz r9, 64*4($inp)
+ lwz r10,64*5($inp)
+ lwz r11,64*6($inp)
+ lwz r12,64*7($inp)
+ sldi r9, r9, 32
+ sldi r10,r10,32
+ sldi r11,r11,32
+ sldi r12,r12,32
+ or r5,r5,r9
+ or r6,r6,r10
+ or r7,r7,r11
+ or r8,r8,r12
+ and r5,r5,r0
+ and r6,r6,r0
+ and r7,r7,r0
+ and r8,r8,r0
+ std r5,64($out) # Z
+ std r6,72($out)
+ std r7,80($out)
+ std r8,88($out)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
+
+########################################################################
+# void ecp_nistz256_scatter_w7(void *out, const P256_POINT_AFFINE *inp,
+# int index);
+.globl ecp_nistz256_scatter_w7
+.align 4
+ecp_nistz256_scatter_w7:
+ li r0,8
+ mtctr r0
+ add $out,$out,$index
+ subi $inp,$inp,8
+
+.Loop_scatter_w7:
+ ldu r0,8($inp)
+ stb r0,64*0($out)
+ srdi r0,r0,8
+ stb r0,64*1($out)
+ srdi r0,r0,8
+ stb r0,64*2($out)
+ srdi r0,r0,8
+ stb r0,64*3($out)
+ srdi r0,r0,8
+ stb r0,64*4($out)
+ srdi r0,r0,8
+ stb r0,64*5($out)
+ srdi r0,r0,8
+ stb r0,64*6($out)
+ srdi r0,r0,8
+ stb r0,64*7($out)
+ addi $out,$out,64*8
+ bdnz .Loop_scatter_w7
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size ecp_nistz256_scatter_w7,.-ecp_nistz256_scatter_w7
+
+########################################################################
+# void ecp_nistz256_gather_w7(P256_POINT_AFFINE *out, const void *inp,
+# int index);
+.globl ecp_nistz256_gather_w7
+.align 4
+ecp_nistz256_gather_w7:
+ li r0,8
+ mtctr r0
+ neg r0,$index
+ sradi r0,r0,63
+
+ add $index,$index,r0
+ add $inp,$inp,$index
+ subi $out,$out,8
+
+.Loop_gather_w7:
+ lbz r5, 64*0($inp)
+ lbz r6, 64*1($inp)
+ lbz r7, 64*2($inp)
+ lbz r8, 64*3($inp)
+ lbz r9, 64*4($inp)
+ lbz r10,64*5($inp)
+ lbz r11,64*6($inp)
+ lbz r12,64*7($inp)
+ addi $inp,$inp,64*8
+
+ sldi r6, r6, 8
+ sldi r7, r7, 16
+ sldi r8, r8, 24
+ sldi r9, r9, 32
+ sldi r10,r10,40
+ sldi r11,r11,48
+ sldi r12,r12,56
+
+ or r5,r5,r6
+ or r7,r7,r8
+ or r9,r9,r10
+ or r11,r11,r12
+ or r5,r5,r7
+ or r9,r9,r11
+ or r5,r5,r9
+ and r5,r5,r0
+ stdu r5,8($out)
+ bdnz .Loop_gather_w7
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
+___
+}
+
+foreach (split("\n",$code)) {
+ s/\`([^\`]*)\`/eval $1/ge;
+
+ print $_,"\n";
+}
+close STDOUT; # enforce flush
diff --git a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-sparcv9.pl b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-sparcv9.pl
index 0c1af95b13..0a4def6e2b 100755
--- a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-sparcv9.pl
+++ b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-sparcv9.pl
@@ -413,7 +413,7 @@ __ecp_nistz256_add:
! if a+b >= modulus, subtract modulus.
!
! But since comparison implies subtraction, we subtract
- ! modulus and then add it back if subraction borrowed.
+ ! modulus and then add it back if subtraction borrowed.
subcc @acc[0],-1,@acc[0]
subccc @acc[1],-1,@acc[1]
@@ -1592,7 +1592,7 @@ ___
########################################################################
# Following subroutines are VIS3 counterparts of those above that
# implement ones found in ecp_nistz256.c. Key difference is that they
-# use 128-bit muliplication and addition with 64-bit carry, and in order
+# use 128-bit multiplication and addition with 64-bit carry, and in order
# to do that they perform conversion from uin32_t[8] to uint64_t[4] upon
# entry and vice versa on return.
#
@@ -1874,7 +1874,7 @@ $code.=<<___ if ($i<3);
ldx [$bp+8*($i+1)],$bi ! bp[$i+1]
___
$code.=<<___;
- addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
+ addcc $acc1,$t0,$acc1 ! accumulate high parts of multiplication
sllx $acc0,32,$t0
addxccc $acc2,$t1,$acc2
srlx $acc0,32,$t1
@@ -1977,7 +1977,7 @@ $code.=<<___;
srlx $acc0,32,$t1
addxccc $acc3,$t2,$acc2 ! +=acc[0]*0xFFFFFFFF00000001
sub $acc0,$t0,$t2 ! acc0*0xFFFFFFFF00000001, low part
- addxc %g0,$t3,$acc3 ! cant't overflow
+ addxc %g0,$t3,$acc3 ! can't overflow
___
}
$code.=<<___;
diff --git a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86.pl b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86.pl
index b3bec23228..0c6fc665bf 100755
--- a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86.pl
+++ b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86.pl
@@ -45,7 +45,7 @@ require "x86asm.pl";
$output=pop;
open STDOUT,">$output";
-&asm_init($ARGV[0],"ecp_nistz256-x86.pl",$ARGV[$#ARGV] eq "386");
+&asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
$sse2=0;
for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
@@ -443,7 +443,7 @@ for(1..37) {
&mov (&DWP(20,"esp"),"eax");
&mov (&DWP(24,"esp"),"eax");
&mov (&DWP(28,"esp"),"eax");
-
+
&call ("_ecp_nistz256_sub");
&stack_pop(8);
diff --git a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86_64.pl b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86_64.pl
index 714e852a18..eba6ffd430 100755
--- a/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86_64.pl
+++ b/deps/openssl/openssl/crypto/ec/asm/ecp_nistz256-x86_64.pl
@@ -1,60 +1,44 @@
#! /usr/bin/env perl
-# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved.
+# Copyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved.
+# Copyright (c) 2014, Intel Corporation. All Rights Reserved.
+# Copyright (c) 2015 CloudFlare, Inc.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
-
-
-##############################################################################
-# #
-# Copyright 2014 Intel Corporation #
-# #
-# Licensed under the Apache License, Version 2.0 (the "License"); #
-# you may not use this file except in compliance with the License. #
-# You may obtain a copy of the License at #
-# #
-# http://www.apache.org/licenses/LICENSE-2.0 #
-# #
-# Unless required by applicable law or agreed to in writing, software #
-# distributed under the License is distributed on an "AS IS" BASIS, #
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
-# See the License for the specific language governing permissions and #
-# limitations under the License. #
-# #
-##############################################################################
-# #
-# Developers and authors: #
-# Shay Gueron (1, 2), and Vlad Krasnov (1) #
-# (1) Intel Corporation, Israel Development Center #
-# (2) University of Haifa #
-# Reference: #
-# S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with#
-# 256 Bit Primes" #
-# #
-##############################################################################
+#
+# Originally written by Shay Gueron (1, 2), and Vlad Krasnov (1, 3)
+# (1) Intel Corporation, Israel Development Center, Haifa, Israel
+# (2) University of Haifa, Israel
+# (3) CloudFlare, Inc.
+#
+# Reference:
+# S.Gueron and V.Krasnov, "Fast Prime Field Elliptic Curve Cryptography with
+# 256 Bit Primes"
# Further optimization by <appro@openssl.org>:
#
# this/original with/without -DECP_NISTZ256_ASM(*)
-# Opteron +12-49% +110-150%
-# Bulldozer +14-45% +175-210%
-# P4 +18-46% n/a :-(
-# Westmere +12-34% +80-87%
-# Sandy Bridge +9-35% +110-120%
-# Ivy Bridge +9-35% +110-125%
-# Haswell +8-37% +140-160%
-# Broadwell +18-58% +145-210%
-# Atom +15-50% +130-180%
-# VIA Nano +43-160% +300-480%
+# Opteron +15-49% +150-195%
+# Bulldozer +18-45% +175-240%
+# P4 +24-46% +100-150%
+# Westmere +18-34% +87-160%
+# Sandy Bridge +14-35% +120-185%
+# Ivy Bridge +11-35% +125-180%
+# Haswell +10-37% +160-200%
+# Broadwell +24-58% +210-270%
+# Atom +20-50% +180-240%
+# VIA Nano +50-160% +480-480%
#
# (*) "without -DECP_NISTZ256_ASM" refers to build with
# "enable-ec_nistp_64_gcc_128";
#
# Ranges denote minimum and maximum improvement coefficients depending
-# on benchmark. Lower coefficients are for ECDSA sign, relatively fastest
-# server-side operation. Keep in mind that +100% means 2x improvement.
+# on benchmark. In "this/original" column lower coefficient is for
+# ECDSA sign, while in "with/without" - for ECDH key agreement, and
+# higher - for ECDSA sign, relatively fastest server-side operation.
+# Keep in mind that +100% means 2x improvement.
$flavour = shift;
$output = shift;
@@ -115,6 +99,12 @@ $code.=<<___;
.long 3,3,3,3,3,3,3,3
.LONE_mont:
.quad 0x0000000000000001, 0xffffffff00000000, 0xffffffffffffffff, 0x00000000fffffffe
+
+# Constants for computations modulo ord(p256)
+.Lord:
+.quad 0xf3b9cac2fc632551, 0xbce6faada7179e84, 0xffffffffffffffff, 0xffffffff00000000
+.LordK:
+.quad 0xccd1c8aaee00bc4f
___
{
@@ -131,8 +121,12 @@ $code.=<<___;
.type ecp_nistz256_mul_by_2,\@function,2
.align 64
ecp_nistz256_mul_by_2:
+.cfi_startproc
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
+.Lmul_by_2_body:
mov 8*0($a_ptr), $a0
xor $t4,$t4
@@ -165,9 +159,15 @@ ecp_nistz256_mul_by_2:
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
- pop %r13
- pop %r12
+ mov 0(%rsp),%r13
+.cfi_restore %r13
+ mov 8(%rsp),%r12
+.cfi_restore %r12
+ lea 16(%rsp),%rsp
+.cfi_adjust_cfa_offset -16
+.Lmul_by_2_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_mul_by_2,.-ecp_nistz256_mul_by_2
################################################################################
@@ -176,8 +176,12 @@ ecp_nistz256_mul_by_2:
.type ecp_nistz256_div_by_2,\@function,2
.align 32
ecp_nistz256_div_by_2:
+.cfi_startproc
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
+.Ldiv_by_2_body:
mov 8*0($a_ptr), $a0
mov 8*1($a_ptr), $a1
@@ -225,9 +229,15 @@ ecp_nistz256_div_by_2:
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
- pop %r13
- pop %r12
+ mov 0(%rsp),%r13
+.cfi_restore %r13
+ mov 8(%rsp),%r12
+.cfi_restore %r12
+ lea 16(%rsp),%rsp
+.cfi_adjust_cfa_offset -16
+.Ldiv_by_2_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_div_by_2,.-ecp_nistz256_div_by_2
################################################################################
@@ -236,8 +246,12 @@ ecp_nistz256_div_by_2:
.type ecp_nistz256_mul_by_3,\@function,2
.align 32
ecp_nistz256_mul_by_3:
+.cfi_startproc
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
+.Lmul_by_3_body:
mov 8*0($a_ptr), $a0
xor $t4, $t4
@@ -291,9 +305,15 @@ ecp_nistz256_mul_by_3:
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
- pop %r13
- pop %r12
+ mov 0(%rsp),%r13
+.cfi_restore %r13
+ mov 8(%rsp),%r12
+.cfi_restore %r12
+ lea 16(%rsp),%rsp
+.cfi_adjust_cfa_offset -16
+.Lmul_by_3_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_mul_by_3,.-ecp_nistz256_mul_by_3
################################################################################
@@ -302,8 +322,12 @@ ecp_nistz256_mul_by_3:
.type ecp_nistz256_add,\@function,3
.align 32
ecp_nistz256_add:
+.cfi_startproc
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
+.Ladd_body:
mov 8*0($a_ptr), $a0
xor $t4, $t4
@@ -337,9 +361,15 @@ ecp_nistz256_add:
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
- pop %r13
- pop %r12
+ mov 0(%rsp),%r13
+.cfi_restore %r13
+ mov 8(%rsp),%r12
+.cfi_restore %r12
+ lea 16(%rsp),%rsp
+.cfi_adjust_cfa_offset -16
+.Ladd_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_add,.-ecp_nistz256_add
################################################################################
@@ -348,8 +378,12 @@ ecp_nistz256_add:
.type ecp_nistz256_sub,\@function,3
.align 32
ecp_nistz256_sub:
+.cfi_startproc
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
+.Lsub_body:
mov 8*0($a_ptr), $a0
xor $t4, $t4
@@ -383,9 +417,15 @@ ecp_nistz256_sub:
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
- pop %r13
- pop %r12
+ mov 0(%rsp),%r13
+.cfi_restore %r13
+ mov 8(%rsp),%r12
+.cfi_restore %r12
+ lea 16(%rsp),%rsp
+.cfi_adjust_cfa_offset -16
+.Lsub_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_sub,.-ecp_nistz256_sub
################################################################################
@@ -394,8 +434,12 @@ ecp_nistz256_sub:
.type ecp_nistz256_neg,\@function,2
.align 32
ecp_nistz256_neg:
+.cfi_startproc
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
+.Lneg_body:
xor $a0, $a0
xor $a1, $a1
@@ -429,9 +473,15 @@ ecp_nistz256_neg:
mov $a2, 8*2($r_ptr)
mov $a3, 8*3($r_ptr)
- pop %r13
- pop %r12
+ mov 0(%rsp),%r13
+.cfi_restore %r13
+ mov 8(%rsp),%r12
+.cfi_restore %r12
+ lea 16(%rsp),%rsp
+.cfi_adjust_cfa_offset -16
+.Lneg_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_neg,.-ecp_nistz256_neg
___
}
@@ -443,6 +493,1085 @@ my ($poly1,$poly3)=($acc6,$acc7);
$code.=<<___;
################################################################################
+# void ecp_nistz256_ord_mul_mont(
+# uint64_t res[4],
+# uint64_t a[4],
+# uint64_t b[4]);
+
+.globl ecp_nistz256_ord_mul_mont
+.type ecp_nistz256_ord_mul_mont,\@function,3
+.align 32
+ecp_nistz256_ord_mul_mont:
+.cfi_startproc
+___
+$code.=<<___ if ($addx);
+ mov \$0x80100, %ecx
+ and OPENSSL_ia32cap_P+8(%rip), %ecx
+ cmp \$0x80100, %ecx
+ je .Lecp_nistz256_ord_mul_montx
+___
+$code.=<<___;
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+.Lord_mul_body:
+
+ mov 8*0($b_org), %rax
+ mov $b_org, $b_ptr
+ lea .Lord(%rip), %r14
+ mov .LordK(%rip), %r15
+
+ ################################# * b[0]
+ mov %rax, $t0
+ mulq 8*0($a_ptr)
+ mov %rax, $acc0
+ mov $t0, %rax
+ mov %rdx, $acc1
+
+ mulq 8*1($a_ptr)
+ add %rax, $acc1
+ mov $t0, %rax
+ adc \$0, %rdx
+ mov %rdx, $acc2
+
+ mulq 8*2($a_ptr)
+ add %rax, $acc2
+ mov $t0, %rax
+ adc \$0, %rdx
+
+ mov $acc0, $acc5
+ imulq %r15,$acc0
+
+ mov %rdx, $acc3
+ mulq 8*3($a_ptr)
+ add %rax, $acc3
+ mov $acc0, %rax
+ adc \$0, %rdx
+ mov %rdx, $acc4
+
+ ################################# First reduction step
+ mulq 8*0(%r14)
+ mov $acc0, $t1
+ add %rax, $acc5 # guaranteed to be zero
+ mov $acc0, %rax
+ adc \$0, %rdx
+ mov %rdx, $t0
+
+ sub $acc0, $acc2
+ sbb \$0, $acc0 # can't borrow
+
+ mulq 8*1(%r14)
+ add $t0, $acc1
+ adc \$0, %rdx
+ add %rax, $acc1
+ mov $t1, %rax
+ adc %rdx, $acc2
+ mov $t1, %rdx
+ adc \$0, $acc0 # can't overflow
+
+ shl \$32, %rax
+ shr \$32, %rdx
+ sub %rax, $acc3
+ mov 8*1($b_ptr), %rax
+ sbb %rdx, $t1 # can't borrow
+
+ add $acc0, $acc3
+ adc $t1, $acc4
+ adc \$0, $acc5
+
+ ################################# * b[1]
+ mov %rax, $t0
+ mulq 8*0($a_ptr)
+ add %rax, $acc1
+ mov $t0, %rax
+ adc \$0, %rdx
+ mov %rdx, $t1
+
+ mulq 8*1($a_ptr)
+ add $t1, $acc2
+ adc \$0, %rdx
+ add %rax, $acc2
+ mov $t0, %rax
+ adc \$0, %rdx
+ mov %rdx, $t1
+
+ mulq 8*2($a_ptr)
+ add $t1, $acc3
+ adc \$0, %rdx
+ add %rax, $acc3
+ mov $t0, %rax
+ adc \$0, %rdx
+
+ mov $acc1, $t0
+ imulq %r15, $acc1
+
+ mov %rdx, $t1
+ mulq 8*3($a_ptr)
+ add $t1, $acc4
+ adc \$0, %rdx
+ xor $acc0, $acc0
+ add %rax, $acc4
+ mov $acc1, %rax
+ adc %rdx, $acc5
+ adc \$0, $acc0
+
+ ################################# Second reduction step
+ mulq 8*0(%r14)
+ mov $acc1, $t1
+ add %rax, $t0 # guaranteed to be zero
+ mov $acc1, %rax
+ adc %rdx, $t0
+
+ sub $acc1, $acc3
+ sbb \$0, $acc1 # can't borrow
+
+ mulq 8*1(%r14)
+ add $t0, $acc2
+ adc \$0, %rdx
+ add %rax, $acc2
+ mov $t1, %rax
+ adc %rdx, $acc3
+ mov $t1, %rdx
+ adc \$0, $acc1 # can't overflow
+
+ shl \$32, %rax
+ shr \$32, %rdx
+ sub %rax, $acc4
+ mov 8*2($b_ptr), %rax
+ sbb %rdx, $t1 # can't borrow
+
+ add $acc1, $acc4
+ adc $t1, $acc5
+ adc \$0, $acc0
+
+ ################################## * b[2]
+ mov %rax, $t0
+ mulq 8*0($a_ptr)
+ add %rax, $acc2
+ mov $t0, %rax
+ adc \$0, %rdx
+ mov %rdx, $t1
+
+ mulq 8*1($a_ptr)
+ add $t1, $acc3
+ adc \$0, %rdx
+ add %rax, $acc3
+ mov $t0, %rax
+ adc \$0, %rdx
+ mov %rdx, $t1
+
+ mulq 8*2($a_ptr)
+ add $t1, $acc4
+ adc \$0, %rdx
+ add %rax, $acc4
+ mov $t0, %rax
+ adc \$0, %rdx
+
+ mov $acc2, $t0
+ imulq %r15, $acc2
+
+ mov %rdx, $t1
+ mulq 8*3($a_ptr)
+ add $t1, $acc5
+ adc \$0, %rdx
+ xor $acc1, $acc1
+ add %rax, $acc5
+ mov $acc2, %rax
+ adc %rdx, $acc0
+ adc \$0, $acc1
+
+ ################################# Third reduction step
+ mulq 8*0(%r14)
+ mov $acc2, $t1
+ add %rax, $t0 # guaranteed to be zero
+ mov $acc2, %rax
+ adc %rdx, $t0
+
+ sub $acc2, $acc4
+ sbb \$0, $acc2 # can't borrow
+
+ mulq 8*1(%r14)
+ add $t0, $acc3
+ adc \$0, %rdx
+ add %rax, $acc3
+ mov $t1, %rax
+ adc %rdx, $acc4
+ mov $t1, %rdx
+ adc \$0, $acc2 # can't overflow
+
+ shl \$32, %rax
+ shr \$32, %rdx
+ sub %rax, $acc5
+ mov 8*3($b_ptr), %rax
+ sbb %rdx, $t1 # can't borrow
+
+ add $acc2, $acc5
+ adc $t1, $acc0
+ adc \$0, $acc1
+
+ ################################# * b[3]
+ mov %rax, $t0
+ mulq 8*0($a_ptr)
+ add %rax, $acc3
+ mov $t0, %rax
+ adc \$0, %rdx
+ mov %rdx, $t1
+
+ mulq 8*1($a_ptr)
+ add $t1, $acc4
+ adc \$0, %rdx
+ add %rax, $acc4
+ mov $t0, %rax
+ adc \$0, %rdx
+ mov %rdx, $t1
+
+ mulq 8*2($a_ptr)
+ add $t1, $acc5
+ adc \$0, %rdx
+ add %rax, $acc5
+ mov $t0, %rax
+ adc \$0, %rdx
+
+ mov $acc3, $t0
+ imulq %r15, $acc3
+
+ mov %rdx, $t1
+ mulq 8*3($a_ptr)
+ add $t1, $acc0
+ adc \$0, %rdx
+ xor $acc2, $acc2
+ add %rax, $acc0
+ mov $acc3, %rax
+ adc %rdx, $acc1
+ adc \$0, $acc2
+
+ ################################# Last reduction step
+ mulq 8*0(%r14)
+ mov $acc3, $t1
+ add %rax, $t0 # guaranteed to be zero
+ mov $acc3, %rax
+ adc %rdx, $t0
+
+ sub $acc3, $acc5
+ sbb \$0, $acc3 # can't borrow
+
+ mulq 8*1(%r14)
+ add $t0, $acc4
+ adc \$0, %rdx
+ add %rax, $acc4
+ mov $t1, %rax
+ adc %rdx, $acc5
+ mov $t1, %rdx
+ adc \$0, $acc3 # can't overflow
+
+ shl \$32, %rax
+ shr \$32, %rdx
+ sub %rax, $acc0
+ sbb %rdx, $t1 # can't borrow
+
+ add $acc3, $acc0
+ adc $t1, $acc1
+ adc \$0, $acc2
+
+ ################################# Subtract ord
+ mov $acc4, $a_ptr
+ sub 8*0(%r14), $acc4
+ mov $acc5, $acc3
+ sbb 8*1(%r14), $acc5
+ mov $acc0, $t0
+ sbb 8*2(%r14), $acc0
+ mov $acc1, $t1
+ sbb 8*3(%r14), $acc1
+ sbb \$0, $acc2
+
+ cmovc $a_ptr, $acc4
+ cmovc $acc3, $acc5
+ cmovc $t0, $acc0
+ cmovc $t1, $acc1
+
+ mov $acc4, 8*0($r_ptr)
+ mov $acc5, 8*1($r_ptr)
+ mov $acc0, 8*2($r_ptr)
+ mov $acc1, 8*3($r_ptr)
+
+ mov 0(%rsp),%r15
+.cfi_restore %r15
+ mov 8(%rsp),%r14
+.cfi_restore %r14
+ mov 16(%rsp),%r13
+.cfi_restore %r13
+ mov 24(%rsp),%r12
+.cfi_restore %r12
+ mov 32(%rsp),%rbx
+.cfi_restore %rbx
+ mov 40(%rsp),%rbp
+.cfi_restore %rbp
+ lea 48(%rsp),%rsp
+.cfi_adjust_cfa_offset -48
+.Lord_mul_epilogue:
+ ret
+.cfi_endproc
+.size ecp_nistz256_ord_mul_mont,.-ecp_nistz256_ord_mul_mont
+
+################################################################################
+# void ecp_nistz256_ord_sqr_mont(
+# uint64_t res[4],
+# uint64_t a[4],
+# int rep);
+
+.globl ecp_nistz256_ord_sqr_mont
+.type ecp_nistz256_ord_sqr_mont,\@function,3
+.align 32
+ecp_nistz256_ord_sqr_mont:
+.cfi_startproc
+___
+$code.=<<___ if ($addx);
+ mov \$0x80100, %ecx
+ and OPENSSL_ia32cap_P+8(%rip), %ecx
+ cmp \$0x80100, %ecx
+ je .Lecp_nistz256_ord_sqr_montx
+___
+$code.=<<___;
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+.Lord_sqr_body:
+
+ mov 8*0($a_ptr), $acc0
+ mov 8*1($a_ptr), %rax
+ mov 8*2($a_ptr), $acc6
+ mov 8*3($a_ptr), $acc7
+ lea .Lord(%rip), $a_ptr # pointer to modulus
+ mov $b_org, $b_ptr
+ jmp .Loop_ord_sqr
+
+.align 32
+.Loop_ord_sqr:
+ ################################# a[1:] * a[0]
+ mov %rax, $t1 # put aside a[1]
+ mul $acc0 # a[1] * a[0]
+ mov %rax, $acc1
+ movq $t1, %xmm1 # offload a[1]
+ mov $acc6, %rax
+ mov %rdx, $acc2
+
+ mul $acc0 # a[2] * a[0]
+ add %rax, $acc2
+ mov $acc7, %rax
+ movq $acc6, %xmm2 # offload a[2]
+ adc \$0, %rdx
+ mov %rdx, $acc3
+
+ mul $acc0 # a[3] * a[0]
+ add %rax, $acc3
+ mov $acc7, %rax
+ movq $acc7, %xmm3 # offload a[3]
+ adc \$0, %rdx
+ mov %rdx, $acc4
+
+ ################################# a[3] * a[2]
+ mul $acc6 # a[3] * a[2]
+ mov %rax, $acc5
+ mov $acc6, %rax
+ mov %rdx, $acc6
+
+ ################################# a[2:] * a[1]
+ mul $t1 # a[2] * a[1]
+ add %rax, $acc3
+ mov $acc7, %rax
+ adc \$0, %rdx
+ mov %rdx, $acc7
+
+ mul $t1 # a[3] * a[1]
+ add %rax, $acc4
+ adc \$0, %rdx
+
+ add $acc7, $acc4
+ adc %rdx, $acc5
+ adc \$0, $acc6 # can't overflow
+
+ ################################# *2
+ xor $acc7, $acc7
+ mov $acc0, %rax
+ add $acc1, $acc1
+ adc $acc2, $acc2
+ adc $acc3, $acc3
+ adc $acc4, $acc4
+ adc $acc5, $acc5
+ adc $acc6, $acc6
+ adc \$0, $acc7
+
+ ################################# Missing products
+ mul %rax # a[0] * a[0]
+ mov %rax, $acc0
+ movq %xmm1, %rax
+ mov %rdx, $t1
+
+ mul %rax # a[1] * a[1]
+ add $t1, $acc1
+ adc %rax, $acc2
+ movq %xmm2, %rax
+ adc \$0, %rdx
+ mov %rdx, $t1
+
+ mul %rax # a[2] * a[2]
+ add $t1, $acc3
+ adc %rax, $acc4
+ movq %xmm3, %rax
+ adc \$0, %rdx
+ mov %rdx, $t1
+
+ mov $acc0, $t0
+ imulq 8*4($a_ptr), $acc0 # *= .LordK
+
+ mul %rax # a[3] * a[3]
+ add $t1, $acc5
+ adc %rax, $acc6
+ mov 8*0($a_ptr), %rax # modulus[0]
+ adc %rdx, $acc7 # can't overflow
+
+ ################################# First reduction step
+ mul $acc0
+ mov $acc0, $t1
+ add %rax, $t0 # guaranteed to be zero
+ mov 8*1($a_ptr), %rax # modulus[1]
+ adc %rdx, $t0
+
+ sub $acc0, $acc2
+ sbb \$0, $t1 # can't borrow
+
+ mul $acc0
+ add $t0, $acc1
+ adc \$0, %rdx
+ add %rax, $acc1
+ mov $acc0, %rax
+ adc %rdx, $acc2
+ mov $acc0, %rdx
+ adc \$0, $t1 # can't overflow
+
+ mov $acc1, $t0
+ imulq 8*4($a_ptr), $acc1 # *= .LordK
+
+ shl \$32, %rax
+ shr \$32, %rdx
+ sub %rax, $acc3
+ mov 8*0($a_ptr), %rax
+ sbb %rdx, $acc0 # can't borrow
+
+ add $t1, $acc3
+ adc \$0, $acc0 # can't overflow
+
+ ################################# Second reduction step
+ mul $acc1
+ mov $acc1, $t1
+ add %rax, $t0 # guaranteed to be zero
+ mov 8*1($a_ptr), %rax
+ adc %rdx, $t0
+
+ sub $acc1, $acc3
+ sbb \$0, $t1 # can't borrow
+
+ mul $acc1
+ add $t0, $acc2
+ adc \$0, %rdx
+ add %rax, $acc2
+ mov $acc1, %rax
+ adc %rdx, $acc3
+ mov $acc1, %rdx
+ adc \$0, $t1 # can't overflow
+
+ mov $acc2, $t0
+ imulq 8*4($a_ptr), $acc2 # *= .LordK
+
+ shl \$32, %rax
+ shr \$32, %rdx
+ sub %rax, $acc0
+ mov 8*0($a_ptr), %rax
+ sbb %rdx, $acc1 # can't borrow
+
+ add $t1, $acc0
+ adc \$0, $acc1 # can't overflow
+
+ ################################# Third reduction step
+ mul $acc2
+ mov $acc2, $t1
+ add %rax, $t0 # guaranteed to be zero
+ mov 8*1($a_ptr), %rax
+ adc %rdx, $t0
+
+ sub $acc2, $acc0
+ sbb \$0, $t1 # can't borrow
+
+ mul $acc2
+ add $t0, $acc3
+ adc \$0, %rdx
+ add %rax, $acc3
+ mov $acc2, %rax
+ adc %rdx, $acc0
+ mov $acc2, %rdx
+ adc \$0, $t1 # can't overflow
+
+ mov $acc3, $t0
+ imulq 8*4($a_ptr), $acc3 # *= .LordK
+
+ shl \$32, %rax
+ shr \$32, %rdx
+ sub %rax, $acc1
+ mov 8*0($a_ptr), %rax
+ sbb %rdx, $acc2 # can't borrow
+
+ add $t1, $acc1
+ adc \$0, $acc2 # can't overflow
+
+ ################################# Last reduction step
+ mul $acc3
+ mov $acc3, $t1
+ add %rax, $t0 # guaranteed to be zero
+ mov 8*1($a_ptr), %rax
+ adc %rdx, $t0
+
+ sub $acc3, $acc1
+ sbb \$0, $t1 # can't borrow
+
+ mul $acc3
+ add $t0, $acc0
+ adc \$0, %rdx
+ add %rax, $acc0
+ mov $acc3, %rax
+ adc %rdx, $acc1
+ mov $acc3, %rdx
+ adc \$0, $t1 # can't overflow
+
+ shl \$32, %rax
+ shr \$32, %rdx
+ sub %rax, $acc2
+ sbb %rdx, $acc3 # can't borrow
+
+ add $t1, $acc2
+ adc \$0, $acc3 # can't overflow
+
+ ################################# Add bits [511:256] of the sqr result
+ xor %rdx, %rdx
+ add $acc4, $acc0
+ adc $acc5, $acc1
+ mov $acc0, $acc4
+ adc $acc6, $acc2
+ adc $acc7, $acc3
+ mov $acc1, %rax
+ adc \$0, %rdx
+
+ ################################# Compare to modulus
+ sub 8*0($a_ptr), $acc0
+ mov $acc2, $acc6
+ sbb 8*1($a_ptr), $acc1
+ sbb 8*2($a_ptr), $acc2
+ mov $acc3, $acc7
+ sbb 8*3($a_ptr), $acc3
+ sbb \$0, %rdx
+
+ cmovc $acc4, $acc0
+ cmovnc $acc1, %rax
+ cmovnc $acc2, $acc6
+ cmovnc $acc3, $acc7
+
+ dec $b_ptr
+ jnz .Loop_ord_sqr
+
+ mov $acc0, 8*0($r_ptr)
+ mov %rax, 8*1($r_ptr)
+ pxor %xmm1, %xmm1
+ mov $acc6, 8*2($r_ptr)
+ pxor %xmm2, %xmm2
+ mov $acc7, 8*3($r_ptr)
+ pxor %xmm3, %xmm3
+
+ mov 0(%rsp),%r15
+.cfi_restore %r15
+ mov 8(%rsp),%r14
+.cfi_restore %r14
+ mov 16(%rsp),%r13
+.cfi_restore %r13
+ mov 24(%rsp),%r12
+.cfi_restore %r12
+ mov 32(%rsp),%rbx
+.cfi_restore %rbx
+ mov 40(%rsp),%rbp
+.cfi_restore %rbp
+ lea 48(%rsp),%rsp
+.cfi_adjust_cfa_offset -48
+.Lord_sqr_epilogue:
+ ret
+.cfi_endproc
+.size ecp_nistz256_ord_sqr_mont,.-ecp_nistz256_ord_sqr_mont
+___
+
+$code.=<<___ if ($addx);
+################################################################################
+.type ecp_nistz256_ord_mul_montx,\@function,3
+.align 32
+ecp_nistz256_ord_mul_montx:
+.cfi_startproc
+.Lecp_nistz256_ord_mul_montx:
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+.Lord_mulx_body:
+
+ mov $b_org, $b_ptr
+ mov 8*0($b_org), %rdx
+ mov 8*0($a_ptr), $acc1
+ mov 8*1($a_ptr), $acc2
+ mov 8*2($a_ptr), $acc3
+ mov 8*3($a_ptr), $acc4
+ lea -128($a_ptr), $a_ptr # control u-op density
+ lea .Lord-128(%rip), %r14
+ mov .LordK(%rip), %r15
+
+ ################################# Multiply by b[0]
+ mulx $acc1, $acc0, $acc1
+ mulx $acc2, $t0, $acc2
+ mulx $acc3, $t1, $acc3
+ add $t0, $acc1
+ mulx $acc4, $t0, $acc4
+ mov $acc0, %rdx
+ mulx %r15, %rdx, %rax
+ adc $t1, $acc2
+ adc $t0, $acc3
+ adc \$0, $acc4
+
+ ################################# reduction
+ xor $acc5, $acc5 # $acc5=0, cf=0, of=0
+ mulx 8*0+128(%r14), $t0, $t1
+ adcx $t0, $acc0 # guaranteed to be zero
+ adox $t1, $acc1
+
+ mulx 8*1+128(%r14), $t0, $t1
+ adcx $t0, $acc1
+ adox $t1, $acc2
+
+ mulx 8*2+128(%r14), $t0, $t1
+ adcx $t0, $acc2
+ adox $t1, $acc3
+
+ mulx 8*3+128(%r14), $t0, $t1
+ mov 8*1($b_ptr), %rdx
+ adcx $t0, $acc3
+ adox $t1, $acc4
+ adcx $acc0, $acc4
+ adox $acc0, $acc5
+ adc \$0, $acc5 # cf=0, of=0
+
+ ################################# Multiply by b[1]
+ mulx 8*0+128($a_ptr), $t0, $t1
+ adcx $t0, $acc1
+ adox $t1, $acc2
+
+ mulx 8*1+128($a_ptr), $t0, $t1
+ adcx $t0, $acc2
+ adox $t1, $acc3
+
+ mulx 8*2+128($a_ptr), $t0, $t1
+ adcx $t0, $acc3
+ adox $t1, $acc4
+
+ mulx 8*3+128($a_ptr), $t0, $t1
+ mov $acc1, %rdx
+ mulx %r15, %rdx, %rax
+ adcx $t0, $acc4
+ adox $t1, $acc5
+
+ adcx $acc0, $acc5
+ adox $acc0, $acc0
+ adc \$0, $acc0 # cf=0, of=0
+
+ ################################# reduction
+ mulx 8*0+128(%r14), $t0, $t1
+ adcx $t0, $acc1 # guaranteed to be zero
+ adox $t1, $acc2
+
+ mulx 8*1+128(%r14), $t0, $t1
+ adcx $t0, $acc2
+ adox $t1, $acc3
+
+ mulx 8*2+128(%r14), $t0, $t1
+ adcx $t0, $acc3
+ adox $t1, $acc4
+
+ mulx 8*3+128(%r14), $t0, $t1
+ mov 8*2($b_ptr), %rdx
+ adcx $t0, $acc4
+ adox $t1, $acc5
+ adcx $acc1, $acc5
+ adox $acc1, $acc0
+ adc \$0, $acc0 # cf=0, of=0
+
+ ################################# Multiply by b[2]
+ mulx 8*0+128($a_ptr), $t0, $t1
+ adcx $t0, $acc2
+ adox $t1, $acc3
+
+ mulx 8*1+128($a_ptr), $t0, $t1
+ adcx $t0, $acc3
+ adox $t1, $acc4
+
+ mulx 8*2+128($a_ptr), $t0, $t1
+ adcx $t0, $acc4
+ adox $t1, $acc5
+
+ mulx 8*3+128($a_ptr), $t0, $t1
+ mov $acc2, %rdx
+ mulx %r15, %rdx, %rax
+ adcx $t0, $acc5
+ adox $t1, $acc0
+
+ adcx $acc1, $acc0
+ adox $acc1, $acc1
+ adc \$0, $acc1 # cf=0, of=0
+
+ ################################# reduction
+ mulx 8*0+128(%r14), $t0, $t1
+ adcx $t0, $acc2 # guaranteed to be zero
+ adox $t1, $acc3
+
+ mulx 8*1+128(%r14), $t0, $t1
+ adcx $t0, $acc3
+ adox $t1, $acc4
+
+ mulx 8*2+128(%r14), $t0, $t1
+ adcx $t0, $acc4
+ adox $t1, $acc5
+
+ mulx 8*3+128(%r14), $t0, $t1
+ mov 8*3($b_ptr), %rdx
+ adcx $t0, $acc5
+ adox $t1, $acc0
+ adcx $acc2, $acc0
+ adox $acc2, $acc1
+ adc \$0, $acc1 # cf=0, of=0
+
+ ################################# Multiply by b[3]
+ mulx 8*0+128($a_ptr), $t0, $t1
+ adcx $t0, $acc3
+ adox $t1, $acc4
+
+ mulx 8*1+128($a_ptr), $t0, $t1
+ adcx $t0, $acc4
+ adox $t1, $acc5
+
+ mulx 8*2+128($a_ptr), $t0, $t1
+ adcx $t0, $acc5
+ adox $t1, $acc0
+
+ mulx 8*3+128($a_ptr), $t0, $t1
+ mov $acc3, %rdx
+ mulx %r15, %rdx, %rax
+ adcx $t0, $acc0
+ adox $t1, $acc1
+
+ adcx $acc2, $acc1
+ adox $acc2, $acc2
+ adc \$0, $acc2 # cf=0, of=0
+
+ ################################# reduction
+ mulx 8*0+128(%r14), $t0, $t1
+ adcx $t0, $acc3 # guranteed to be zero
+ adox $t1, $acc4
+
+ mulx 8*1+128(%r14), $t0, $t1
+ adcx $t0, $acc4
+ adox $t1, $acc5
+
+ mulx 8*2+128(%r14), $t0, $t1
+ adcx $t0, $acc5
+ adox $t1, $acc0
+
+ mulx 8*3+128(%r14), $t0, $t1
+ lea 128(%r14),%r14
+ mov $acc4, $t2
+ adcx $t0, $acc0
+ adox $t1, $acc1
+ mov $acc5, $t3
+ adcx $acc3, $acc1
+ adox $acc3, $acc2
+ adc \$0, $acc2
+
+ #################################
+ # Branch-less conditional subtraction of P
+ mov $acc0, $t0
+ sub 8*0(%r14), $acc4
+ sbb 8*1(%r14), $acc5
+ sbb 8*2(%r14), $acc0
+ mov $acc1, $t1
+ sbb 8*3(%r14), $acc1
+ sbb \$0, $acc2
+
+ cmovc $t2, $acc4
+ cmovc $t3, $acc5
+ cmovc $t0, $acc0
+ cmovc $t1, $acc1
+
+ mov $acc4, 8*0($r_ptr)
+ mov $acc5, 8*1($r_ptr)
+ mov $acc0, 8*2($r_ptr)
+ mov $acc1, 8*3($r_ptr)
+
+ mov 0(%rsp),%r15
+.cfi_restore %r15
+ mov 8(%rsp),%r14
+.cfi_restore %r14
+ mov 16(%rsp),%r13
+.cfi_restore %r13
+ mov 24(%rsp),%r12
+.cfi_restore %r12
+ mov 32(%rsp),%rbx
+.cfi_restore %rbx
+ mov 40(%rsp),%rbp
+.cfi_restore %rbp
+ lea 48(%rsp),%rsp
+.cfi_adjust_cfa_offset -48
+.Lord_mulx_epilogue:
+ ret
+.cfi_endproc
+.size ecp_nistz256_ord_mul_montx,.-ecp_nistz256_ord_mul_montx
+
+.type ecp_nistz256_ord_sqr_montx,\@function,3
+.align 32
+ecp_nistz256_ord_sqr_montx:
+.cfi_startproc
+.Lecp_nistz256_ord_sqr_montx:
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+.Lord_sqrx_body:
+
+ mov $b_org, $b_ptr
+ mov 8*0($a_ptr), %rdx
+ mov 8*1($a_ptr), $acc6
+ mov 8*2($a_ptr), $acc7
+ mov 8*3($a_ptr), $acc0
+ lea .Lord(%rip), $a_ptr
+ jmp .Loop_ord_sqrx
+
+.align 32
+.Loop_ord_sqrx:
+ mulx $acc6, $acc1, $acc2 # a[0]*a[1]
+ mulx $acc7, $t0, $acc3 # a[0]*a[2]
+ mov %rdx, %rax # offload a[0]
+ movq $acc6, %xmm1 # offload a[1]
+ mulx $acc0, $t1, $acc4 # a[0]*a[3]
+ mov $acc6, %rdx
+ add $t0, $acc2
+ movq $acc7, %xmm2 # offload a[2]
+ adc $t1, $acc3
+ adc \$0, $acc4
+ xor $acc5, $acc5 # $acc5=0,cf=0,of=0
+ #################################
+ mulx $acc7, $t0, $t1 # a[1]*a[2]
+ adcx $t0, $acc3
+ adox $t1, $acc4
+
+ mulx $acc0, $t0, $t1 # a[1]*a[3]
+ mov $acc7, %rdx
+ adcx $t0, $acc4
+ adox $t1, $acc5
+ adc \$0, $acc5
+ #################################
+ mulx $acc0, $t0, $acc6 # a[2]*a[3]
+ mov %rax, %rdx
+ movq $acc0, %xmm3 # offload a[3]
+ xor $acc7, $acc7 # $acc7=0,cf=0,of=0
+ adcx $acc1, $acc1 # acc1:6<<1
+ adox $t0, $acc5
+ adcx $acc2, $acc2
+ adox $acc7, $acc6 # of=0
+
+ ################################# a[i]*a[i]
+ mulx %rdx, $acc0, $t1
+ movq %xmm1, %rdx
+ adcx $acc3, $acc3
+ adox $t1, $acc1
+ adcx $acc4, $acc4
+ mulx %rdx, $t0, $t4
+ movq %xmm2, %rdx
+ adcx $acc5, $acc5
+ adox $t0, $acc2
+ adcx $acc6, $acc6
+ mulx %rdx, $t0, $t1
+ .byte 0x67
+ movq %xmm3, %rdx
+ adox $t4, $acc3
+ adcx $acc7, $acc7
+ adox $t0, $acc4
+ adox $t1, $acc5
+ mulx %rdx, $t0, $t4
+ adox $t0, $acc6
+ adox $t4, $acc7
+
+ ################################# reduction
+ mov $acc0, %rdx
+ mulx 8*4($a_ptr), %rdx, $t0
+
+ xor %rax, %rax # cf=0, of=0
+ mulx 8*0($a_ptr), $t0, $t1
+ adcx $t0, $acc0 # guaranteed to be zero
+ adox $t1, $acc1
+ mulx 8*1($a_ptr), $t0, $t1
+ adcx $t0, $acc1
+ adox $t1, $acc2
+ mulx 8*2($a_ptr), $t0, $t1
+ adcx $t0, $acc2
+ adox $t1, $acc3
+ mulx 8*3($a_ptr), $t0, $t1
+ adcx $t0, $acc3
+ adox $t1, $acc0 # of=0
+ adcx %rax, $acc0 # cf=0
+
+ #################################
+ mov $acc1, %rdx
+ mulx 8*4($a_ptr), %rdx, $t0
+
+ mulx 8*0($a_ptr), $t0, $t1
+ adox $t0, $acc1 # guaranteed to be zero
+ adcx $t1, $acc2
+ mulx 8*1($a_ptr), $t0, $t1
+ adox $t0, $acc2
+ adcx $t1, $acc3
+ mulx 8*2($a_ptr), $t0, $t1
+ adox $t0, $acc3
+ adcx $t1, $acc0
+ mulx 8*3($a_ptr), $t0, $t1
+ adox $t0, $acc0
+ adcx $t1, $acc1 # cf=0
+ adox %rax, $acc1 # of=0
+
+ #################################
+ mov $acc2, %rdx
+ mulx 8*4($a_ptr), %rdx, $t0
+
+ mulx 8*0($a_ptr), $t0, $t1
+ adcx $t0, $acc2 # guaranteed to be zero
+ adox $t1, $acc3
+ mulx 8*1($a_ptr), $t0, $t1
+ adcx $t0, $acc3
+ adox $t1, $acc0
+ mulx 8*2($a_ptr), $t0, $t1
+ adcx $t0, $acc0
+ adox $t1, $acc1
+ mulx 8*3($a_ptr), $t0, $t1
+ adcx $t0, $acc1
+ adox $t1, $acc2 # of=0
+ adcx %rax, $acc2 # cf=0
+
+ #################################
+ mov $acc3, %rdx
+ mulx 8*4($a_ptr), %rdx, $t0
+
+ mulx 8*0($a_ptr), $t0, $t1
+ adox $t0, $acc3 # guaranteed to be zero
+ adcx $t1, $acc0
+ mulx 8*1($a_ptr), $t0, $t1
+ adox $t0, $acc0
+ adcx $t1, $acc1
+ mulx 8*2($a_ptr), $t0, $t1
+ adox $t0, $acc1
+ adcx $t1, $acc2
+ mulx 8*3($a_ptr), $t0, $t1
+ adox $t0, $acc2
+ adcx $t1, $acc3
+ adox %rax, $acc3
+
+ ################################# accumulate upper half
+ add $acc0, $acc4 # add $acc4, $acc0
+ adc $acc5, $acc1
+ mov $acc4, %rdx
+ adc $acc6, $acc2
+ adc $acc7, $acc3
+ mov $acc1, $acc6
+ adc \$0, %rax
+
+ ################################# compare to modulus
+ sub 8*0($a_ptr), $acc4
+ mov $acc2, $acc7
+ sbb 8*1($a_ptr), $acc1
+ sbb 8*2($a_ptr), $acc2
+ mov $acc3, $acc0
+ sbb 8*3($a_ptr), $acc3
+ sbb \$0, %rax
+
+ cmovnc $acc4, %rdx
+ cmovnc $acc1, $acc6
+ cmovnc $acc2, $acc7
+ cmovnc $acc3, $acc0
+
+ dec $b_ptr
+ jnz .Loop_ord_sqrx
+
+ mov %rdx, 8*0($r_ptr)
+ mov $acc6, 8*1($r_ptr)
+ pxor %xmm1, %xmm1
+ mov $acc7, 8*2($r_ptr)
+ pxor %xmm2, %xmm2
+ mov $acc0, 8*3($r_ptr)
+ pxor %xmm3, %xmm3
+
+ mov 0(%rsp),%r15
+.cfi_restore %r15
+ mov 8(%rsp),%r14
+.cfi_restore %r14
+ mov 16(%rsp),%r13
+.cfi_restore %r13
+ mov 24(%rsp),%r12
+.cfi_restore %r12
+ mov 32(%rsp),%rbx
+.cfi_restore %rbx
+ mov 40(%rsp),%rbp
+.cfi_restore %rbp
+ lea 48(%rsp),%rsp
+.cfi_adjust_cfa_offset -48
+.Lord_sqrx_epilogue:
+ ret
+.cfi_endproc
+.size ecp_nistz256_ord_sqr_montx,.-ecp_nistz256_ord_sqr_montx
+___
+
+$code.=<<___;
+################################################################################
# void ecp_nistz256_to_mont(
# uint64_t res[4],
# uint64_t in[4]);
@@ -470,6 +1599,7 @@ $code.=<<___;
.type ecp_nistz256_mul_mont,\@function,3
.align 32
ecp_nistz256_mul_mont:
+.cfi_startproc
___
$code.=<<___ if ($addx);
mov \$0x80100, %ecx
@@ -478,11 +1608,18 @@ ___
$code.=<<___;
.Lmul_mont:
push %rbp
+.cfi_push %rbp
push %rbx
+.cfi_push %rbx
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
+.Lmul_body:
___
$code.=<<___ if ($addx);
cmp \$0x80100, %ecx
@@ -515,13 +1652,23 @@ $code.=<<___ if ($addx);
___
$code.=<<___;
.Lmul_mont_done:
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbx
- pop %rbp
+ mov 0(%rsp),%r15
+.cfi_restore %r15
+ mov 8(%rsp),%r14
+.cfi_restore %r14
+ mov 16(%rsp),%r13
+.cfi_restore %r13
+ mov 24(%rsp),%r12
+.cfi_restore %r12
+ mov 32(%rsp),%rbx
+.cfi_restore %rbx
+ mov 40(%rsp),%rbp
+.cfi_restore %rbp
+ lea 48(%rsp),%rsp
+.cfi_adjust_cfa_offset -48
+.Lmul_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_mul_mont,.-ecp_nistz256_mul_mont
.type __ecp_nistz256_mul_montq,\@abi-omnipotent
@@ -611,7 +1758,7 @@ __ecp_nistz256_mul_montq:
adc \$0, $acc0
########################################################################
- # Second reduction step
+ # Second reduction step
mov $acc1, $t1
shl \$32, $acc1
mulq $poly3
@@ -658,7 +1805,7 @@ __ecp_nistz256_mul_montq:
adc \$0, $acc1
########################################################################
- # Third reduction step
+ # Third reduction step
mov $acc2, $t1
shl \$32, $acc2
mulq $poly3
@@ -705,7 +1852,7 @@ __ecp_nistz256_mul_montq:
adc \$0, $acc2
########################################################################
- # Final reduction step
+ # Final reduction step
mov $acc3, $t1
shl \$32, $acc3
mulq $poly3
@@ -718,7 +1865,7 @@ __ecp_nistz256_mul_montq:
mov $acc5, $t1
adc \$0, $acc2
- ########################################################################
+ ########################################################################
# Branch-less conditional subtraction of P
sub \$-1, $acc4 # .Lpoly[0]
mov $acc0, $t2
@@ -751,6 +1898,7 @@ __ecp_nistz256_mul_montq:
.type ecp_nistz256_sqr_mont,\@function,2
.align 32
ecp_nistz256_sqr_mont:
+.cfi_startproc
___
$code.=<<___ if ($addx);
mov \$0x80100, %ecx
@@ -758,11 +1906,18 @@ $code.=<<___ if ($addx);
___
$code.=<<___;
push %rbp
+.cfi_push %rbp
push %rbx
+.cfi_push %rbx
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
+.Lsqr_body:
___
$code.=<<___ if ($addx);
cmp \$0x80100, %ecx
@@ -791,13 +1946,23 @@ $code.=<<___ if ($addx);
___
$code.=<<___;
.Lsqr_mont_done:
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbx
- pop %rbp
+ mov 0(%rsp),%r15
+.cfi_restore %r15
+ mov 8(%rsp),%r14
+.cfi_restore %r14
+ mov 16(%rsp),%r13
+.cfi_restore %r13
+ mov 24(%rsp),%r12
+.cfi_restore %r12
+ mov 32(%rsp),%rbx
+.cfi_restore %rbx
+ mov 40(%rsp),%rbp
+.cfi_restore %rbp
+ lea 48(%rsp),%rsp
+.cfi_adjust_cfa_offset -48
+.Lsqr_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_sqr_mont,.-ecp_nistz256_sqr_mont
.type __ecp_nistz256_sqr_montq,\@abi-omnipotent
@@ -1278,8 +2443,12 @@ $code.=<<___;
.type ecp_nistz256_from_mont,\@function,2
.align 32
ecp_nistz256_from_mont:
+.cfi_startproc
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
+.Lfrom_body:
mov 8*0($in_ptr), %rax
mov .Lpoly+8*3(%rip), $t2
@@ -1360,9 +2529,15 @@ ecp_nistz256_from_mont:
mov $acc2, 8*2($r_ptr)
mov $acc3, 8*3($r_ptr)
- pop %r13
- pop %r12
+ mov 0(%rsp),%r13
+.cfi_restore %r13
+ mov 8(%rsp),%r12
+.cfi_restore %r12
+ lea 16(%rsp),%rsp
+.cfi_adjust_cfa_offset -16
+.Lfrom_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_from_mont,.-ecp_nistz256_from_mont
___
}
@@ -1488,10 +2663,10 @@ $code.=<<___ if ($win64);
movaps 0x80(%rsp), %xmm14
movaps 0x90(%rsp), %xmm15
lea 0xa8(%rsp), %rsp
-.LSEH_end_ecp_nistz256_gather_w5:
___
$code.=<<___;
ret
+.LSEH_end_ecp_nistz256_gather_w5:
.size ecp_nistz256_gather_w5,.-ecp_nistz256_gather_w5
################################################################################
@@ -1593,10 +2768,10 @@ $code.=<<___ if ($win64);
movaps 0x80(%rsp), %xmm14
movaps 0x90(%rsp), %xmm15
lea 0xa8(%rsp), %rsp
-.LSEH_end_ecp_nistz256_gather_w7:
___
$code.=<<___;
ret
+.LSEH_end_ecp_nistz256_gather_w7:
.size ecp_nistz256_gather_w7,.-ecp_nistz256_gather_w7
___
}
@@ -1617,18 +2792,19 @@ ecp_nistz256_avx2_gather_w5:
___
$code.=<<___ if ($win64);
lea -0x88(%rsp), %rax
+ mov %rsp,%r11
.LSEH_begin_ecp_nistz256_avx2_gather_w5:
- .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
- .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
- .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
- .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
- .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
- .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
- .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
- .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
- .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
- .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
- .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
+ .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax), %rsp
+ .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6, -0x20(%rax)
+ .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7, -0x10(%rax)
+ .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8, 8(%rax)
+ .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9, 0x10(%rax)
+ .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10, 0x20(%rax)
+ .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11, 0x30(%rax)
+ .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12, 0x40(%rax)
+ .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13, 0x50(%rax)
+ .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14, 0x60(%rax)
+ .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15, 0x70(%rax)
___
$code.=<<___;
vmovdqa .LTwo(%rip), $TWO
@@ -1694,11 +2870,11 @@ $code.=<<___ if ($win64);
movaps 0x70(%rsp), %xmm13
movaps 0x80(%rsp), %xmm14
movaps 0x90(%rsp), %xmm15
- lea 0xa8(%rsp), %rsp
-.LSEH_end_ecp_nistz256_avx2_gather_w5:
+ lea (%r11), %rsp
___
$code.=<<___;
ret
+.LSEH_end_ecp_nistz256_avx2_gather_w5:
.size ecp_nistz256_avx2_gather_w5,.-ecp_nistz256_avx2_gather_w5
___
}
@@ -1721,19 +2897,20 @@ ecp_nistz256_avx2_gather_w7:
vzeroupper
___
$code.=<<___ if ($win64);
+ mov %rsp,%r11
lea -0x88(%rsp), %rax
.LSEH_begin_ecp_nistz256_avx2_gather_w7:
- .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax), %rsp
- .byte 0xc5,0xf8,0x29,0x70,0xe0 #vmovaps %xmm6, -0x20(%rax)
- .byte 0xc5,0xf8,0x29,0x78,0xf0 #vmovaps %xmm7, -0x10(%rax)
- .byte 0xc5,0x78,0x29,0x40,0x00 #vmovaps %xmm8, 8(%rax)
- .byte 0xc5,0x78,0x29,0x48,0x10 #vmovaps %xmm9, 0x10(%rax)
- .byte 0xc5,0x78,0x29,0x50,0x20 #vmovaps %xmm10, 0x20(%rax)
- .byte 0xc5,0x78,0x29,0x58,0x30 #vmovaps %xmm11, 0x30(%rax)
- .byte 0xc5,0x78,0x29,0x60,0x40 #vmovaps %xmm12, 0x40(%rax)
- .byte 0xc5,0x78,0x29,0x68,0x50 #vmovaps %xmm13, 0x50(%rax)
- .byte 0xc5,0x78,0x29,0x70,0x60 #vmovaps %xmm14, 0x60(%rax)
- .byte 0xc5,0x78,0x29,0x78,0x70 #vmovaps %xmm15, 0x70(%rax)
+ .byte 0x48,0x8d,0x60,0xe0 # lea -0x20(%rax), %rsp
+ .byte 0xc5,0xf8,0x29,0x70,0xe0 # vmovaps %xmm6, -0x20(%rax)
+ .byte 0xc5,0xf8,0x29,0x78,0xf0 # vmovaps %xmm7, -0x10(%rax)
+ .byte 0xc5,0x78,0x29,0x40,0x00 # vmovaps %xmm8, 8(%rax)
+ .byte 0xc5,0x78,0x29,0x48,0x10 # vmovaps %xmm9, 0x10(%rax)
+ .byte 0xc5,0x78,0x29,0x50,0x20 # vmovaps %xmm10, 0x20(%rax)
+ .byte 0xc5,0x78,0x29,0x58,0x30 # vmovaps %xmm11, 0x30(%rax)
+ .byte 0xc5,0x78,0x29,0x60,0x40 # vmovaps %xmm12, 0x40(%rax)
+ .byte 0xc5,0x78,0x29,0x68,0x50 # vmovaps %xmm13, 0x50(%rax)
+ .byte 0xc5,0x78,0x29,0x70,0x60 # vmovaps %xmm14, 0x60(%rax)
+ .byte 0xc5,0x78,0x29,0x78,0x70 # vmovaps %xmm15, 0x70(%rax)
___
$code.=<<___;
vmovdqa .LThree(%rip), $THREE
@@ -1814,11 +2991,11 @@ $code.=<<___ if ($win64);
movaps 0x70(%rsp), %xmm13
movaps 0x80(%rsp), %xmm14
movaps 0x90(%rsp), %xmm15
- lea 0xa8(%rsp), %rsp
-.LSEH_end_ecp_nistz256_avx2_gather_w7:
+ lea (%r11), %rsp
___
$code.=<<___;
ret
+.LSEH_end_ecp_nistz256_avx2_gather_w7:
.size ecp_nistz256_avx2_gather_w7,.-ecp_nistz256_avx2_gather_w7
___
} else {
@@ -2022,6 +3199,7 @@ $code.=<<___;
.type ecp_nistz256_point_double,\@function,2
.align 32
ecp_nistz256_point_double:
+.cfi_startproc
___
$code.=<<___ if ($addx);
mov \$0x80100, %ecx
@@ -2038,17 +3216,26 @@ $code.=<<___;
.type ecp_nistz256_point_doublex,\@function,2
.align 32
ecp_nistz256_point_doublex:
+.cfi_startproc
.Lpoint_doublex:
___
}
$code.=<<___;
push %rbp
+.cfi_push %rbp
push %rbx
+.cfi_push %rbx
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
sub \$32*5+8, %rsp
+.cfi_adjust_cfa_offset 32*5+8
+.Lpoint_double${x}_body:
.Lpoint_double_shortcut$x:
movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr.x
@@ -2114,7 +3301,7 @@ $code.=<<___;
movq %xmm1, $r_ptr
call __ecp_nistz256_sqr_mont$x # p256_sqr_mont(res_y, S);
___
-{
+{
######## ecp_nistz256_div_by_2(res_y, res_y); ##########################
# operate in 4-5-6-7 "name space" that matches squaring output
#
@@ -2203,7 +3390,7 @@ $code.=<<___;
lea $M(%rsp), $b_ptr
mov $acc4, $acc6 # harmonize sub output and mul input
xor %ecx, %ecx
- mov $acc4, $S+8*0(%rsp) # have to save:-(
+ mov $acc4, $S+8*0(%rsp) # have to save:-(
mov $acc5, $acc2
mov $acc5, $S+8*1(%rsp)
cmovz $acc0, $acc3
@@ -2219,14 +3406,25 @@ $code.=<<___;
movq %xmm1, $r_ptr
call __ecp_nistz256_sub_from$x # p256_sub(res_y, S, res_y);
- add \$32*5+8, %rsp
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbx
- pop %rbp
+ lea 32*5+56(%rsp), %rsi
+.cfi_def_cfa %rsi,8
+ mov -48(%rsi),%r15
+.cfi_restore %r15
+ mov -40(%rsi),%r14
+.cfi_restore %r14
+ mov -32(%rsi),%r13
+.cfi_restore %r13
+ mov -24(%rsi),%r12
+.cfi_restore %r12
+ mov -16(%rsi),%rbx
+.cfi_restore %rbx
+ mov -8(%rsi),%rbp
+.cfi_restore %rbp
+ lea (%rsi),%rsp
+.cfi_def_cfa_register %rsp
+.Lpoint_double${x}_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_point_double$sfx,.-ecp_nistz256_point_double$sfx
___
}
@@ -2252,6 +3450,7 @@ $code.=<<___;
.type ecp_nistz256_point_add,\@function,3
.align 32
ecp_nistz256_point_add:
+.cfi_startproc
___
$code.=<<___ if ($addx);
mov \$0x80100, %ecx
@@ -2268,17 +3467,26 @@ $code.=<<___;
.type ecp_nistz256_point_addx,\@function,3
.align 32
ecp_nistz256_point_addx:
+.cfi_startproc
.Lpoint_addx:
___
}
$code.=<<___;
push %rbp
+.cfi_push %rbp
push %rbx
+.cfi_push %rbx
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
sub \$32*18+8, %rsp
+.cfi_adjust_cfa_offset 32*18+8
+.Lpoint_add${x}_body:
movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
movdqu 0x10($a_ptr), %xmm1
@@ -2587,14 +3795,25 @@ $code.=<<___;
movdqu %xmm3, 0x30($r_ptr)
.Ladd_done$x:
- add \$32*18+8, %rsp
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbx
- pop %rbp
+ lea 32*18+56(%rsp), %rsi
+.cfi_def_cfa %rsi,8
+ mov -48(%rsi),%r15
+.cfi_restore %r15
+ mov -40(%rsi),%r14
+.cfi_restore %r14
+ mov -32(%rsi),%r13
+.cfi_restore %r13
+ mov -24(%rsi),%r12
+.cfi_restore %r12
+ mov -16(%rsi),%rbx
+.cfi_restore %rbx
+ mov -8(%rsi),%rbp
+.cfi_restore %rbp
+ lea (%rsi),%rsp
+.cfi_def_cfa_register %rsp
+.Lpoint_add${x}_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_point_add$sfx,.-ecp_nistz256_point_add$sfx
___
}
@@ -2619,6 +3838,7 @@ $code.=<<___;
.type ecp_nistz256_point_add_affine,\@function,3
.align 32
ecp_nistz256_point_add_affine:
+.cfi_startproc
___
$code.=<<___ if ($addx);
mov \$0x80100, %ecx
@@ -2635,17 +3855,26 @@ $code.=<<___;
.type ecp_nistz256_point_add_affinex,\@function,3
.align 32
ecp_nistz256_point_add_affinex:
+.cfi_startproc
.Lpoint_add_affinex:
___
}
$code.=<<___;
push %rbp
+.cfi_push %rbp
push %rbx
+.cfi_push %rbx
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
push %r14
+.cfi_push %r14
push %r15
+.cfi_push %r15
sub \$32*15+8, %rsp
+.cfi_adjust_cfa_offset 32*15+8
+.Ladd_affine${x}_body:
movdqu 0x00($a_ptr), %xmm0 # copy *(P256_POINT *)$a_ptr
mov $b_org, $b_ptr # reassign
@@ -2890,14 +4119,25 @@ $code.=<<___;
movdqu %xmm2, 0x20($r_ptr)
movdqu %xmm3, 0x30($r_ptr)
- add \$32*15+8, %rsp
- pop %r15
- pop %r14
- pop %r13
- pop %r12
- pop %rbx
- pop %rbp
+ lea 32*15+56(%rsp), %rsi
+.cfi_def_cfa %rsi,8
+ mov -48(%rsi),%r15
+.cfi_restore %r15
+ mov -40(%rsi),%r14
+.cfi_restore %r14
+ mov -32(%rsi),%r13
+.cfi_restore %r13
+ mov -24(%rsi),%r12
+.cfi_restore %r12
+ mov -16(%rsi),%rbx
+.cfi_restore %rbx
+ mov -8(%rsi),%rbp
+.cfi_restore %rbp
+ lea (%rsi),%rsp
+.cfi_def_cfa_register %rsp
+.Ladd_affine${x}_epilogue:
ret
+.cfi_endproc
.size ecp_nistz256_point_add_affine$sfx,.-ecp_nistz256_point_add_affine$sfx
___
}
@@ -3048,11 +4288,395 @@ ___
}
}}}
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+
+.type short_handler,\@abi-omnipotent
+.align 16
+short_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ lea 16(%rax),%rax
+
+ mov -8(%rax),%r12
+ mov -16(%rax),%r13
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+
+ jmp .Lcommon_seh_tail
+.size short_handler,.-short_handler
+
+.type full_handler,\@abi-omnipotent
+.align 16
+full_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 8(%r11),%r10d # HandlerData[2]
+ lea (%rax,%r10),%rax
+
+ mov -8(%rax),%rbp
+ mov -16(%rax),%rbx
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size full_handler,.-full_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_ecp_nistz256_mul_by_2
+ .rva .LSEH_end_ecp_nistz256_mul_by_2
+ .rva .LSEH_info_ecp_nistz256_mul_by_2
+
+ .rva .LSEH_begin_ecp_nistz256_div_by_2
+ .rva .LSEH_end_ecp_nistz256_div_by_2
+ .rva .LSEH_info_ecp_nistz256_div_by_2
+
+ .rva .LSEH_begin_ecp_nistz256_mul_by_3
+ .rva .LSEH_end_ecp_nistz256_mul_by_3
+ .rva .LSEH_info_ecp_nistz256_mul_by_3
+
+ .rva .LSEH_begin_ecp_nistz256_add
+ .rva .LSEH_end_ecp_nistz256_add
+ .rva .LSEH_info_ecp_nistz256_add
+
+ .rva .LSEH_begin_ecp_nistz256_sub
+ .rva .LSEH_end_ecp_nistz256_sub
+ .rva .LSEH_info_ecp_nistz256_sub
+
+ .rva .LSEH_begin_ecp_nistz256_neg
+ .rva .LSEH_end_ecp_nistz256_neg
+ .rva .LSEH_info_ecp_nistz256_neg
+
+ .rva .LSEH_begin_ecp_nistz256_ord_mul_mont
+ .rva .LSEH_end_ecp_nistz256_ord_mul_mont
+ .rva .LSEH_info_ecp_nistz256_ord_mul_mont
+
+ .rva .LSEH_begin_ecp_nistz256_ord_sqr_mont
+ .rva .LSEH_end_ecp_nistz256_ord_sqr_mont
+ .rva .LSEH_info_ecp_nistz256_ord_sqr_mont
+___
+$code.=<<___ if ($addx);
+ .rva .LSEH_begin_ecp_nistz256_ord_mul_montx
+ .rva .LSEH_end_ecp_nistz256_ord_mul_montx
+ .rva .LSEH_info_ecp_nistz256_ord_mul_montx
+
+ .rva .LSEH_begin_ecp_nistz256_ord_sqr_montx
+ .rva .LSEH_end_ecp_nistz256_ord_sqr_montx
+ .rva .LSEH_info_ecp_nistz256_ord_sqr_montx
+___
+$code.=<<___;
+ .rva .LSEH_begin_ecp_nistz256_to_mont
+ .rva .LSEH_end_ecp_nistz256_to_mont
+ .rva .LSEH_info_ecp_nistz256_to_mont
+
+ .rva .LSEH_begin_ecp_nistz256_mul_mont
+ .rva .LSEH_end_ecp_nistz256_mul_mont
+ .rva .LSEH_info_ecp_nistz256_mul_mont
+
+ .rva .LSEH_begin_ecp_nistz256_sqr_mont
+ .rva .LSEH_end_ecp_nistz256_sqr_mont
+ .rva .LSEH_info_ecp_nistz256_sqr_mont
+
+ .rva .LSEH_begin_ecp_nistz256_from_mont
+ .rva .LSEH_end_ecp_nistz256_from_mont
+ .rva .LSEH_info_ecp_nistz256_from_mont
+
+ .rva .LSEH_begin_ecp_nistz256_gather_w5
+ .rva .LSEH_end_ecp_nistz256_gather_w5
+ .rva .LSEH_info_ecp_nistz256_gather_wX
+
+ .rva .LSEH_begin_ecp_nistz256_gather_w7
+ .rva .LSEH_end_ecp_nistz256_gather_w7
+ .rva .LSEH_info_ecp_nistz256_gather_wX
+___
+$code.=<<___ if ($avx>1);
+ .rva .LSEH_begin_ecp_nistz256_avx2_gather_w5
+ .rva .LSEH_end_ecp_nistz256_avx2_gather_w5
+ .rva .LSEH_info_ecp_nistz256_avx2_gather_wX
+
+ .rva .LSEH_begin_ecp_nistz256_avx2_gather_w7
+ .rva .LSEH_end_ecp_nistz256_avx2_gather_w7
+ .rva .LSEH_info_ecp_nistz256_avx2_gather_wX
+___
+$code.=<<___;
+ .rva .LSEH_begin_ecp_nistz256_point_double
+ .rva .LSEH_end_ecp_nistz256_point_double
+ .rva .LSEH_info_ecp_nistz256_point_double
+
+ .rva .LSEH_begin_ecp_nistz256_point_add
+ .rva .LSEH_end_ecp_nistz256_point_add
+ .rva .LSEH_info_ecp_nistz256_point_add
+
+ .rva .LSEH_begin_ecp_nistz256_point_add_affine
+ .rva .LSEH_end_ecp_nistz256_point_add_affine
+ .rva .LSEH_info_ecp_nistz256_point_add_affine
+___
+$code.=<<___ if ($addx);
+ .rva .LSEH_begin_ecp_nistz256_point_doublex
+ .rva .LSEH_end_ecp_nistz256_point_doublex
+ .rva .LSEH_info_ecp_nistz256_point_doublex
+
+ .rva .LSEH_begin_ecp_nistz256_point_addx
+ .rva .LSEH_end_ecp_nistz256_point_addx
+ .rva .LSEH_info_ecp_nistz256_point_addx
+
+ .rva .LSEH_begin_ecp_nistz256_point_add_affinex
+ .rva .LSEH_end_ecp_nistz256_point_add_affinex
+ .rva .LSEH_info_ecp_nistz256_point_add_affinex
+___
+$code.=<<___;
+
+.section .xdata
+.align 8
+.LSEH_info_ecp_nistz256_mul_by_2:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lmul_by_2_body,.Lmul_by_2_epilogue # HandlerData[]
+.LSEH_info_ecp_nistz256_div_by_2:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Ldiv_by_2_body,.Ldiv_by_2_epilogue # HandlerData[]
+.LSEH_info_ecp_nistz256_mul_by_3:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lmul_by_3_body,.Lmul_by_3_epilogue # HandlerData[]
+.LSEH_info_ecp_nistz256_add:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Ladd_body,.Ladd_epilogue # HandlerData[]
+.LSEH_info_ecp_nistz256_sub:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lsub_body,.Lsub_epilogue # HandlerData[]
+.LSEH_info_ecp_nistz256_neg:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lneg_body,.Lneg_epilogue # HandlerData[]
+.LSEH_info_ecp_nistz256_ord_mul_mont:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lord_mul_body,.Lord_mul_epilogue # HandlerData[]
+ .long 48,0
+.LSEH_info_ecp_nistz256_ord_sqr_mont:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lord_sqr_body,.Lord_sqr_epilogue # HandlerData[]
+ .long 48,0
+___
+$code.=<<___ if ($addx);
+.LSEH_info_ecp_nistz256_ord_mul_montx:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lord_mulx_body,.Lord_mulx_epilogue # HandlerData[]
+ .long 48,0
+.LSEH_info_ecp_nistz256_ord_sqr_montx:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lord_sqrx_body,.Lord_sqrx_epilogue # HandlerData[]
+ .long 48,0
+___
+$code.=<<___;
+.LSEH_info_ecp_nistz256_to_mont:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
+ .long 48,0
+.LSEH_info_ecp_nistz256_mul_mont:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lmul_body,.Lmul_epilogue # HandlerData[]
+ .long 48,0
+.LSEH_info_ecp_nistz256_sqr_mont:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lsqr_body,.Lsqr_epilogue # HandlerData[]
+ .long 48,0
+.LSEH_info_ecp_nistz256_from_mont:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lfrom_body,.Lfrom_epilogue # HandlerData[]
+.LSEH_info_ecp_nistz256_gather_wX:
+ .byte 0x01,0x33,0x16,0x00
+ .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15
+ .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14
+ .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13
+ .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12
+ .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11
+ .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10
+ .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9
+ .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8
+ .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
+ .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6
+ .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8
+ .align 8
+___
+$code.=<<___ if ($avx>1);
+.LSEH_info_ecp_nistz256_avx2_gather_wX:
+ .byte 0x01,0x36,0x17,0x0b
+ .byte 0x36,0xf8,0x09,0x00 # vmovaps 0x90(rsp),xmm15
+ .byte 0x31,0xe8,0x08,0x00 # vmovaps 0x80(rsp),xmm14
+ .byte 0x2c,0xd8,0x07,0x00 # vmovaps 0x70(rsp),xmm13
+ .byte 0x27,0xc8,0x06,0x00 # vmovaps 0x60(rsp),xmm12
+ .byte 0x22,0xb8,0x05,0x00 # vmovaps 0x50(rsp),xmm11
+ .byte 0x1d,0xa8,0x04,0x00 # vmovaps 0x40(rsp),xmm10
+ .byte 0x18,0x98,0x03,0x00 # vmovaps 0x30(rsp),xmm9
+ .byte 0x13,0x88,0x02,0x00 # vmovaps 0x20(rsp),xmm8
+ .byte 0x0e,0x78,0x01,0x00 # vmovaps 0x10(rsp),xmm7
+ .byte 0x09,0x68,0x00,0x00 # vmovaps 0x00(rsp),xmm6
+ .byte 0x04,0x01,0x15,0x00 # sub rsp,0xa8
+ .byte 0x00,0xb3,0x00,0x00 # set_frame r11
+ .align 8
+___
+$code.=<<___;
+.LSEH_info_ecp_nistz256_point_double:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lpoint_doubleq_body,.Lpoint_doubleq_epilogue # HandlerData[]
+ .long 32*5+56,0
+.LSEH_info_ecp_nistz256_point_add:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lpoint_addq_body,.Lpoint_addq_epilogue # HandlerData[]
+ .long 32*18+56,0
+.LSEH_info_ecp_nistz256_point_add_affine:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Ladd_affineq_body,.Ladd_affineq_epilogue # HandlerData[]
+ .long 32*15+56,0
+___
+$code.=<<___ if ($addx);
+.align 8
+.LSEH_info_ecp_nistz256_point_doublex:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lpoint_doublex_body,.Lpoint_doublex_epilogue # HandlerData[]
+ .long 32*5+56,0
+.LSEH_info_ecp_nistz256_point_addx:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lpoint_addx_body,.Lpoint_addx_epilogue # HandlerData[]
+ .long 32*18+56,0
+.LSEH_info_ecp_nistz256_point_add_affinex:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Ladd_affinex_body,.Ladd_affinex_epilogue # HandlerData[]
+ .long 32*15+56,0
+___
+}
+
########################################################################
# Convert ecp_nistz256_table.c to layout expected by ecp_nistz_gather_w7
#
-open TABLE,"<ecp_nistz256_table.c" or
-open TABLE,"<${dir}../ecp_nistz256_table.c" or
+open TABLE,"<ecp_nistz256_table.c" or
+open TABLE,"<${dir}../ecp_nistz256_table.c" or
die "failed to open ecp_nistz256_table.c:",$!;
use integer;
diff --git a/deps/openssl/openssl/crypto/ec/asm/x25519-ppc64.pl b/deps/openssl/openssl/crypto/ec/asm/x25519-ppc64.pl
new file mode 100755
index 0000000000..3773cb27cd
--- /dev/null
+++ b/deps/openssl/openssl/crypto/ec/asm/x25519-ppc64.pl
@@ -0,0 +1,824 @@
+#! /usr/bin/env perl
+# Copyright 2018 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# X25519 lower-level primitives for PPC64.
+#
+# July 2018.
+#
+# Base 2^64 is faster than base 2^51 on pre-POWER8, most notably ~15%
+# faster on PPC970/G5. POWER8 on the other hand seems to trip on own
+# shoelaces when handling longer carry chains. As base 2^51 has just
+# single-carry pairs, it's 25% faster than base 2^64. Since PPC970 is
+# pretty old, base 2^64 implementation is not engaged. Comparison to
+# compiler-generated code is complicated by the fact that not all
+# compilers support 128-bit integers. When compiler doesn't, like xlc,
+# this module delivers more than 2x improvement, and when it does,
+# from 12% to 30% improvement was measured...
+
+$flavour = shift;
+while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {}
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}ppc-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/ppc-xlate.pl" and -f $xlate) or
+die "can't locate ppc-xlate.pl";
+
+open OUT,"| \"$^X\" $xlate $flavour $output";
+*STDOUT=*OUT;
+
+my $sp = "r1";
+my ($rp,$ap,$bp) = map("r$_",3..5);
+
+####################################################### base 2^64
+if (0) {
+my ($bi,$a0,$a1,$a2,$a3,$t0,$t1, $t2,$t3,
+ $acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7) =
+ map("r$_",(6..12,22..31));
+my $zero = "r0";
+my $FRAME = 16*8;
+
+$code.=<<___;
+.text
+
+.globl x25519_fe64_mul
+.type x25519_fe64_mul,\@function
+.align 5
+x25519_fe64_mul:
+ stdu $sp,-$FRAME($sp)
+ std r22,`$FRAME-8*10`($sp)
+ std r23,`$FRAME-8*9`($sp)
+ std r24,`$FRAME-8*8`($sp)
+ std r25,`$FRAME-8*7`($sp)
+ std r26,`$FRAME-8*6`($sp)
+ std r27,`$FRAME-8*5`($sp)
+ std r28,`$FRAME-8*4`($sp)
+ std r29,`$FRAME-8*3`($sp)
+ std r30,`$FRAME-8*2`($sp)
+ std r31,`$FRAME-8*1`($sp)
+
+ ld $bi,0($bp)
+ ld $a0,0($ap)
+ xor $zero,$zero,$zero
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+
+ mulld $acc0,$a0,$bi # a[0]*b[0]
+ mulhdu $t0,$a0,$bi
+ mulld $acc1,$a1,$bi # a[1]*b[0]
+ mulhdu $t1,$a1,$bi
+ mulld $acc2,$a2,$bi # a[2]*b[0]
+ mulhdu $t2,$a2,$bi
+ mulld $acc3,$a3,$bi # a[3]*b[0]
+ mulhdu $t3,$a3,$bi
+___
+for(my @acc=($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7),
+ my $i=1; $i<4; shift(@acc), $i++) {
+my $acc4 = $i==1? $zero : @acc[4];
+
+$code.=<<___;
+ ld $bi,`8*$i`($bp)
+ addc @acc[1],@acc[1],$t0 # accumulate high parts
+ mulld $t0,$a0,$bi
+ adde @acc[2],@acc[2],$t1
+ mulld $t1,$a1,$bi
+ adde @acc[3],@acc[3],$t2
+ mulld $t2,$a2,$bi
+ adde @acc[4],$acc4,$t3
+ mulld $t3,$a3,$bi
+ addc @acc[1],@acc[1],$t0 # accumulate low parts
+ mulhdu $t0,$a0,$bi
+ adde @acc[2],@acc[2],$t1
+ mulhdu $t1,$a1,$bi
+ adde @acc[3],@acc[3],$t2
+ mulhdu $t2,$a2,$bi
+ adde @acc[4],@acc[4],$t3
+ mulhdu $t3,$a3,$bi
+ adde @acc[5],$zero,$zero
+___
+}
+$code.=<<___;
+ li $bi,38
+ addc $acc4,$acc4,$t0
+ mulld $t0,$acc4,$bi
+ adde $acc5,$acc5,$t1
+ mulld $t1,$acc5,$bi
+ adde $acc6,$acc6,$t2
+ mulld $t2,$acc6,$bi
+ adde $acc7,$acc7,$t3
+ mulld $t3,$acc7,$bi
+
+ addc $acc0,$acc0,$t0
+ mulhdu $t0,$acc4,$bi
+ adde $acc1,$acc1,$t1
+ mulhdu $t1,$acc5,$bi
+ adde $acc2,$acc2,$t2
+ mulhdu $t2,$acc6,$bi
+ adde $acc3,$acc3,$t3
+ mulhdu $t3,$acc7,$bi
+ adde $acc4,$zero,$zero
+
+ addc $acc1,$acc1,$t0
+ adde $acc2,$acc2,$t1
+ adde $acc3,$acc3,$t2
+ adde $acc4,$acc4,$t3
+
+ mulld $acc4,$acc4,$bi
+
+ addc $acc0,$acc0,$acc4
+ addze $acc1,$acc1
+ addze $acc2,$acc2
+ addze $acc3,$acc3
+
+ subfe $acc4,$acc4,$acc4 # carry -> ~mask
+ std $acc1,8($rp)
+ andc $acc4,$bi,$acc4
+ std $acc2,16($rp)
+ add $acc0,$acc0,$acc4
+ std $acc3,24($rp)
+ std $acc0,0($rp)
+
+ ld r22,`$FRAME-8*10`($sp)
+ ld r23,`$FRAME-8*9`($sp)
+ ld r24,`$FRAME-8*8`($sp)
+ ld r25,`$FRAME-8*7`($sp)
+ ld r26,`$FRAME-8*6`($sp)
+ ld r27,`$FRAME-8*5`($sp)
+ ld r28,`$FRAME-8*4`($sp)
+ ld r29,`$FRAME-8*3`($sp)
+ ld r30,`$FRAME-8*2`($sp)
+ ld r31,`$FRAME-8*1`($sp)
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,10,3,0
+ .long 0
+.size x25519_fe64_mul,.-x25519_fe64_mul
+
+.globl x25519_fe64_sqr
+.type x25519_fe64_sqr,\@function
+.align 5
+x25519_fe64_sqr:
+ stdu $sp,-$FRAME($sp)
+ std r22,`$FRAME-8*10`($sp)
+ std r23,`$FRAME-8*9`($sp)
+ std r24,`$FRAME-8*8`($sp)
+ std r25,`$FRAME-8*7`($sp)
+ std r26,`$FRAME-8*6`($sp)
+ std r27,`$FRAME-8*5`($sp)
+ std r28,`$FRAME-8*4`($sp)
+ std r29,`$FRAME-8*3`($sp)
+ std r30,`$FRAME-8*2`($sp)
+ std r31,`$FRAME-8*1`($sp)
+
+ ld $a0,0($ap)
+ xor $zero,$zero,$zero
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+
+ ################################
+ # | | | | | |a1*a0| |
+ # | | | | |a2*a0| | |
+ # | |a3*a2|a3*a0| | | |
+ # | | | |a2*a1| | | |
+ # | | |a3*a1| | | | |
+ # *| | | | | | | | 2|
+ # +|a3*a3|a2*a2|a1*a1|a0*a0|
+ # |--+--+--+--+--+--+--+--|
+ # |A7|A6|A5|A4|A3|A2|A1|A0|, where Ax is $accx, i.e. follow $accx
+ #
+ # "can't overflow" below mark carrying into high part of
+ # multiplication result, which can't overflow, because it
+ # can never be all ones.
+
+ mulld $acc1,$a1,$a0 # a[1]*a[0]
+ mulhdu $t1,$a1,$a0
+ mulld $acc2,$a2,$a0 # a[2]*a[0]
+ mulhdu $t2,$a2,$a0
+ mulld $acc3,$a3,$a0 # a[3]*a[0]
+ mulhdu $acc4,$a3,$a0
+
+ addc $acc2,$acc2,$t1 # accumulate high parts of multiplication
+ mulld $t0,$a2,$a1 # a[2]*a[1]
+ mulhdu $t1,$a2,$a1
+ adde $acc3,$acc3,$t2
+ mulld $t2,$a3,$a1 # a[3]*a[1]
+ mulhdu $t3,$a3,$a1
+ addze $acc4,$acc4 # can't overflow
+
+ mulld $acc5,$a3,$a2 # a[3]*a[2]
+ mulhdu $acc6,$a3,$a2
+
+ addc $t1,$t1,$t2 # accumulate high parts of multiplication
+ mulld $acc0,$a0,$a0 # a[0]*a[0]
+ addze $t2,$t3 # can't overflow
+
+ addc $acc3,$acc3,$t0 # accumulate low parts of multiplication
+ mulhdu $a0,$a0,$a0
+ adde $acc4,$acc4,$t1
+ mulld $t1,$a1,$a1 # a[1]*a[1]
+ adde $acc5,$acc5,$t2
+ mulhdu $a1,$a1,$a1
+ addze $acc6,$acc6 # can't overflow
+
+ addc $acc1,$acc1,$acc1 # acc[1-6]*=2
+ mulld $t2,$a2,$a2 # a[2]*a[2]
+ adde $acc2,$acc2,$acc2
+ mulhdu $a2,$a2,$a2
+ adde $acc3,$acc3,$acc3
+ mulld $t3,$a3,$a3 # a[3]*a[3]
+ adde $acc4,$acc4,$acc4
+ mulhdu $a3,$a3,$a3
+ adde $acc5,$acc5,$acc5
+ adde $acc6,$acc6,$acc6
+ addze $acc7,$zero
+
+ addc $acc1,$acc1,$a0 # +a[i]*a[i]
+ li $bi,38
+ adde $acc2,$acc2,$t1
+ adde $acc3,$acc3,$a1
+ adde $acc4,$acc4,$t2
+ adde $acc5,$acc5,$a2
+ adde $acc6,$acc6,$t3
+ adde $acc7,$acc7,$a3
+
+ mulld $t0,$acc4,$bi
+ mulld $t1,$acc5,$bi
+ mulld $t2,$acc6,$bi
+ mulld $t3,$acc7,$bi
+
+ addc $acc0,$acc0,$t0
+ mulhdu $t0,$acc4,$bi
+ adde $acc1,$acc1,$t1
+ mulhdu $t1,$acc5,$bi
+ adde $acc2,$acc2,$t2
+ mulhdu $t2,$acc6,$bi
+ adde $acc3,$acc3,$t3
+ mulhdu $t3,$acc7,$bi
+ addze $acc4,$zero
+
+ addc $acc1,$acc1,$t0
+ adde $acc2,$acc2,$t1
+ adde $acc3,$acc3,$t2
+ adde $acc4,$acc4,$t3
+
+ mulld $acc4,$acc4,$bi
+
+ addc $acc0,$acc0,$acc4
+ addze $acc1,$acc1
+ addze $acc2,$acc2
+ addze $acc3,$acc3
+
+ subfe $acc4,$acc4,$acc4 # carry -> ~mask
+ std $acc1,8($rp)
+ andc $acc4,$bi,$acc4
+ std $acc2,16($rp)
+ add $acc0,$acc0,$acc4
+ std $acc3,24($rp)
+ std $acc0,0($rp)
+
+ ld r22,`$FRAME-8*10`($sp)
+ ld r23,`$FRAME-8*9`($sp)
+ ld r24,`$FRAME-8*8`($sp)
+ ld r25,`$FRAME-8*7`($sp)
+ ld r26,`$FRAME-8*6`($sp)
+ ld r27,`$FRAME-8*5`($sp)
+ ld r28,`$FRAME-8*4`($sp)
+ ld r29,`$FRAME-8*3`($sp)
+ ld r30,`$FRAME-8*2`($sp)
+ ld r31,`$FRAME-8*1`($sp)
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,10,2,0
+ .long 0
+.size x25519_fe64_sqr,.-x25519_fe64_sqr
+
+.globl x25519_fe64_mul121666
+.type x25519_fe64_mul121666,\@function
+.align 5
+x25519_fe64_mul121666:
+ lis $bi,`65536>>16`
+ ori $bi,$bi,`121666-65536`
+
+ ld $t0,0($ap)
+ ld $t1,8($ap)
+ ld $bp,16($ap)
+ ld $ap,24($ap)
+
+ mulld $a0,$t0,$bi
+ mulhdu $t0,$t0,$bi
+ mulld $a1,$t1,$bi
+ mulhdu $t1,$t1,$bi
+ mulld $a2,$bp,$bi
+ mulhdu $bp,$bp,$bi
+ mulld $a3,$ap,$bi
+ mulhdu $ap,$ap,$bi
+
+ addc $a1,$a1,$t0
+ adde $a2,$a2,$t1
+ adde $a3,$a3,$bp
+ addze $ap, $ap
+
+ mulli $ap,$ap,38
+
+ addc $a0,$a0,$ap
+ addze $a1,$a1
+ addze $a2,$a2
+ addze $a3,$a3
+
+ subfe $t1,$t1,$t1 # carry -> ~mask
+ std $a1,8($rp)
+ andc $t0,$t0,$t1
+ std $a2,16($rp)
+ add $a0,$a0,$t0
+ std $a3,24($rp)
+ std $a0,0($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size x25519_fe64_mul121666,.-x25519_fe64_mul121666
+
+.globl x25519_fe64_add
+.type x25519_fe64_add,\@function
+.align 5
+x25519_fe64_add:
+ ld $a0,0($ap)
+ ld $t0,0($bp)
+ ld $a1,8($ap)
+ ld $t1,8($bp)
+ ld $a2,16($ap)
+ ld $bi,16($bp)
+ ld $a3,24($ap)
+ ld $bp,24($bp)
+
+ addc $a0,$a0,$t0
+ adde $a1,$a1,$t1
+ adde $a2,$a2,$bi
+ adde $a3,$a3,$bp
+
+ li $t0,38
+ subfe $t1,$t1,$t1 # carry -> ~mask
+ andc $t1,$t0,$t1
+
+ addc $a0,$a0,$t1
+ addze $a1,$a1
+ addze $a2,$a2
+ addze $a3,$a3
+
+ subfe $t1,$t1,$t1 # carry -> ~mask
+ std $a1,8($rp)
+ andc $t0,$t0,$t1
+ std $a2,16($rp)
+ add $a0,$a0,$t0
+ std $a3,24($rp)
+ std $a0,0($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size x25519_fe64_add,.-x25519_fe64_add
+
+.globl x25519_fe64_sub
+.type x25519_fe64_sub,\@function
+.align 5
+x25519_fe64_sub:
+ ld $a0,0($ap)
+ ld $t0,0($bp)
+ ld $a1,8($ap)
+ ld $t1,8($bp)
+ ld $a2,16($ap)
+ ld $bi,16($bp)
+ ld $a3,24($ap)
+ ld $bp,24($bp)
+
+ subfc $a0,$t0,$a0
+ subfe $a1,$t1,$a1
+ subfe $a2,$bi,$a2
+ subfe $a3,$bp,$a3
+
+ li $t0,38
+ subfe $t1,$t1,$t1 # borrow -> mask
+ xor $zero,$zero,$zero
+ and $t1,$t0,$t1
+
+ subfc $a0,$t1,$a0
+ subfe $a1,$zero,$a1
+ subfe $a2,$zero,$a2
+ subfe $a3,$zero,$a3
+
+ subfe $t1,$t1,$t1 # borrow -> mask
+ std $a1,8($rp)
+ and $t0,$t0,$t1
+ std $a2,16($rp)
+ subf $a0,$t0,$a0
+ std $a3,24($rp)
+ std $a0,0($rp)
+
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,3,0
+ .long 0
+.size x25519_fe64_sub,.-x25519_fe64_sub
+
+.globl x25519_fe64_tobytes
+.type x25519_fe64_tobytes,\@function
+.align 5
+x25519_fe64_tobytes:
+ ld $a3,24($ap)
+ ld $a0,0($ap)
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+
+ sradi $t0,$a3,63 # most significant bit -> mask
+ li $t1,19
+ and $t0,$t0,$t1
+ sldi $a3,$a3,1
+ add $t0,$t0,$t1 # compare to modulus in the same go
+ srdi $a3,$a3,1 # most signifcant bit cleared
+
+ addc $a0,$a0,$t0
+ addze $a1,$a1
+ addze $a2,$a2
+ addze $a3,$a3
+
+ xor $zero,$zero,$zero
+ sradi $t0,$a3,63 # most significant bit -> mask
+ sldi $a3,$a3,1
+ andc $t0,$t1,$t0
+ srdi $a3,$a3,1 # most signifcant bit cleared
+
+ subi $rp,$rp,1
+ subfc $a0,$t0,$a0
+ subfe $a1,$zero,$a1
+ subfe $a2,$zero,$a2
+ subfe $a3,$zero,$a3
+
+___
+for (my @a=($a0,$a1,$a2,$a3), my $i=0; $i<4; shift(@a), $i++) {
+$code.=<<___;
+ srdi $t0,@a[0],8
+ stbu @a[0],1($rp)
+ srdi @a[0],@a[0],16
+ stbu $t0,1($rp)
+ srdi $t0,@a[0],8
+ stbu @a[0],1($rp)
+ srdi @a[0],@a[0],16
+ stbu $t0,1($rp)
+ srdi $t0,@a[0],8
+ stbu @a[0],1($rp)
+ srdi @a[0],@a[0],16
+ stbu $t0,1($rp)
+ srdi $t0,@a[0],8
+ stbu @a[0],1($rp)
+ stbu $t0,1($rp)
+___
+}
+$code.=<<___;
+ blr
+ .long 0
+ .byte 0,12,0x14,0,0,0,2,0
+ .long 0
+.size x25519_fe64_tobytes,.-x25519_fe64_tobytes
+___
+}
+####################################################### base 2^51
+{
+my ($bi,$a0,$a1,$a2,$a3,$a4,$t0, $t1,
+ $h0lo,$h0hi,$h1lo,$h1hi,$h2lo,$h2hi,$h3lo,$h3hi,$h4lo,$h4hi) =
+ map("r$_",(6..12,21..31));
+my $mask = "r0";
+my $FRAME = 18*8;
+
+$code.=<<___;
+.text
+
+.globl x25519_fe51_mul
+.type x25519_fe51_mul,\@function
+.align 5
+x25519_fe51_mul:
+ stdu $sp,-$FRAME($sp)
+ std r21,`$FRAME-8*11`($sp)
+ std r22,`$FRAME-8*10`($sp)
+ std r23,`$FRAME-8*9`($sp)
+ std r24,`$FRAME-8*8`($sp)
+ std r25,`$FRAME-8*7`($sp)
+ std r26,`$FRAME-8*6`($sp)
+ std r27,`$FRAME-8*5`($sp)
+ std r28,`$FRAME-8*4`($sp)
+ std r29,`$FRAME-8*3`($sp)
+ std r30,`$FRAME-8*2`($sp)
+ std r31,`$FRAME-8*1`($sp)
+
+ ld $bi,0($bp)
+ ld $a0,0($ap)
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+ ld $a4,32($ap)
+
+ mulld $h0lo,$a0,$bi # a[0]*b[0]
+ mulhdu $h0hi,$a0,$bi
+
+ mulld $h1lo,$a1,$bi # a[1]*b[0]
+ mulhdu $h1hi,$a1,$bi
+
+ mulld $h4lo,$a4,$bi # a[4]*b[0]
+ mulhdu $h4hi,$a4,$bi
+ ld $ap,8($bp)
+ mulli $a4,$a4,19
+
+ mulld $h2lo,$a2,$bi # a[2]*b[0]
+ mulhdu $h2hi,$a2,$bi
+
+ mulld $h3lo,$a3,$bi # a[3]*b[0]
+ mulhdu $h3hi,$a3,$bi
+___
+for(my @a=($a0,$a1,$a2,$a3,$a4),
+ my $i=1; $i<4; $i++) {
+ ($ap,$bi) = ($bi,$ap);
+$code.=<<___;
+ mulld $t0,@a[4],$bi
+ mulhdu $t1,@a[4],$bi
+ addc $h0lo,$h0lo,$t0
+ adde $h0hi,$h0hi,$t1
+
+ mulld $t0,@a[0],$bi
+ mulhdu $t1,@a[0],$bi
+ addc $h1lo,$h1lo,$t0
+ adde $h1hi,$h1hi,$t1
+
+ mulld $t0,@a[3],$bi
+ mulhdu $t1,@a[3],$bi
+ ld $ap,`8*($i+1)`($bp)
+ mulli @a[3],@a[3],19
+ addc $h4lo,$h4lo,$t0
+ adde $h4hi,$h4hi,$t1
+
+ mulld $t0,@a[1],$bi
+ mulhdu $t1,@a[1],$bi
+ addc $h2lo,$h2lo,$t0
+ adde $h2hi,$h2hi,$t1
+
+ mulld $t0,@a[2],$bi
+ mulhdu $t1,@a[2],$bi
+ addc $h3lo,$h3lo,$t0
+ adde $h3hi,$h3hi,$t1
+___
+ unshift(@a,pop(@a));
+}
+ ($ap,$bi) = ($bi,$ap);
+$code.=<<___;
+ mulld $t0,$a1,$bi
+ mulhdu $t1,$a1,$bi
+ addc $h0lo,$h0lo,$t0
+ adde $h0hi,$h0hi,$t1
+
+ mulld $t0,$a2,$bi
+ mulhdu $t1,$a2,$bi
+ addc $h1lo,$h1lo,$t0
+ adde $h1hi,$h1hi,$t1
+
+ mulld $t0,$a3,$bi
+ mulhdu $t1,$a3,$bi
+ addc $h2lo,$h2lo,$t0
+ adde $h2hi,$h2hi,$t1
+
+ mulld $t0,$a4,$bi
+ mulhdu $t1,$a4,$bi
+ addc $h3lo,$h3lo,$t0
+ adde $h3hi,$h3hi,$t1
+
+ mulld $t0,$a0,$bi
+ mulhdu $t1,$a0,$bi
+ addc $h4lo,$h4lo,$t0
+ adde $h4hi,$h4hi,$t1
+
+.Lfe51_reduce:
+ li $mask,-1
+ srdi $mask,$mask,13 # 0x7ffffffffffff
+
+ srdi $t0,$h2lo,51
+ and $a2,$h2lo,$mask
+ insrdi $t0,$h2hi,51,0 # h2>>51
+ srdi $t1,$h0lo,51
+ and $a0,$h0lo,$mask
+ insrdi $t1,$h0hi,51,0 # h0>>51
+ addc $h3lo,$h3lo,$t0
+ addze $h3hi,$h3hi
+ addc $h1lo,$h1lo,$t1
+ addze $h1hi,$h1hi
+
+ srdi $t0,$h3lo,51
+ and $a3,$h3lo,$mask
+ insrdi $t0,$h3hi,51,0 # h3>>51
+ srdi $t1,$h1lo,51
+ and $a1,$h1lo,$mask
+ insrdi $t1,$h1hi,51,0 # h1>>51
+ addc $h4lo,$h4lo,$t0
+ addze $h4hi,$h4hi
+ add $a2,$a2,$t1
+
+ srdi $t0,$h4lo,51
+ and $a4,$h4lo,$mask
+ insrdi $t0,$h4hi,51,0
+ mulli $t0,$t0,19 # (h4 >> 51) * 19
+
+ add $a0,$a0,$t0
+
+ srdi $t1,$a2,51
+ and $a2,$a2,$mask
+ add $a3,$a3,$t1
+
+ srdi $t0,$a0,51
+ and $a0,$a0,$mask
+ add $a1,$a1,$t0
+
+ std $a2,16($rp)
+ std $a3,24($rp)
+ std $a4,32($rp)
+ std $a0,0($rp)
+ std $a1,8($rp)
+
+ ld r21,`$FRAME-8*11`($sp)
+ ld r22,`$FRAME-8*10`($sp)
+ ld r23,`$FRAME-8*9`($sp)
+ ld r24,`$FRAME-8*8`($sp)
+ ld r25,`$FRAME-8*7`($sp)
+ ld r26,`$FRAME-8*6`($sp)
+ ld r27,`$FRAME-8*5`($sp)
+ ld r28,`$FRAME-8*4`($sp)
+ ld r29,`$FRAME-8*3`($sp)
+ ld r30,`$FRAME-8*2`($sp)
+ ld r31,`$FRAME-8*1`($sp)
+ addi $sp,$sp,$FRAME
+ blr
+ .long 0
+ .byte 0,12,4,0,0x80,11,3,0
+ .long 0
+.size x25519_fe51_mul,.-x25519_fe51_mul
+___
+{
+my ($a0,$a1,$a2,$a3,$a4,$t0,$t1) = ($a0,$a1,$a2,$a3,$a4,$t0,$t1);
+$code.=<<___;
+.globl x25519_fe51_sqr
+.type x25519_fe51_sqr,\@function
+.align 5
+x25519_fe51_sqr:
+ stdu $sp,-$FRAME($sp)
+ std r21,`$FRAME-8*11`($sp)
+ std r22,`$FRAME-8*10`($sp)
+ std r23,`$FRAME-8*9`($sp)
+ std r24,`$FRAME-8*8`($sp)
+ std r25,`$FRAME-8*7`($sp)
+ std r26,`$FRAME-8*6`($sp)
+ std r27,`$FRAME-8*5`($sp)
+ std r28,`$FRAME-8*4`($sp)
+ std r29,`$FRAME-8*3`($sp)
+ std r30,`$FRAME-8*2`($sp)
+ std r31,`$FRAME-8*1`($sp)
+
+ ld $a0,0($ap)
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+ ld $a4,32($ap)
+
+ add $bi,$a0,$a0 # a[0]*2
+ mulli $t1,$a4,19 # a[4]*19
+
+ mulld $h0lo,$a0,$a0
+ mulhdu $h0hi,$a0,$a0
+ mulld $h1lo,$a1,$bi
+ mulhdu $h1hi,$a1,$bi
+ mulld $h2lo,$a2,$bi
+ mulhdu $h2hi,$a2,$bi
+ mulld $h3lo,$a3,$bi
+ mulhdu $h3hi,$a3,$bi
+ mulld $h4lo,$a4,$bi
+ mulhdu $h4hi,$a4,$bi
+ add $bi,$a1,$a1 # a[1]*2
+___
+ ($a4,$t1) = ($t1,$a4);
+$code.=<<___;
+ mulld $t0,$t1,$a4
+ mulhdu $t1,$t1,$a4
+ addc $h3lo,$h3lo,$t0
+ adde $h3hi,$h3hi,$t1
+
+ mulli $bp,$a3,19 # a[3]*19
+
+ mulld $t0,$a1,$a1
+ mulhdu $t1,$a1,$a1
+ addc $h2lo,$h2lo,$t0
+ adde $h2hi,$h2hi,$t1
+ mulld $t0,$a2,$bi
+ mulhdu $t1,$a2,$bi
+ addc $h3lo,$h3lo,$t0
+ adde $h3hi,$h3hi,$t1
+ mulld $t0,$a3,$bi
+ mulhdu $t1,$a3,$bi
+ addc $h4lo,$h4lo,$t0
+ adde $h4hi,$h4hi,$t1
+ mulld $t0,$a4,$bi
+ mulhdu $t1,$a4,$bi
+ add $bi,$a3,$a3 # a[3]*2
+ addc $h0lo,$h0lo,$t0
+ adde $h0hi,$h0hi,$t1
+___
+ ($a3,$t1) = ($bp,$a3);
+$code.=<<___;
+ mulld $t0,$t1,$a3
+ mulhdu $t1,$t1,$a3
+ addc $h1lo,$h1lo,$t0
+ adde $h1hi,$h1hi,$t1
+ mulld $t0,$bi,$a4
+ mulhdu $t1,$bi,$a4
+ add $bi,$a2,$a2 # a[2]*2
+ addc $h2lo,$h2lo,$t0
+ adde $h2hi,$h2hi,$t1
+
+ mulld $t0,$a2,$a2
+ mulhdu $t1,$a2,$a2
+ addc $h4lo,$h4lo,$t0
+ adde $h4hi,$h4hi,$t1
+ mulld $t0,$a3,$bi
+ mulhdu $t1,$a3,$bi
+ addc $h0lo,$h0lo,$t0
+ adde $h0hi,$h0hi,$t1
+ mulld $t0,$a4,$bi
+ mulhdu $t1,$a4,$bi
+ addc $h1lo,$h1lo,$t0
+ adde $h1hi,$h1hi,$t1
+
+ b .Lfe51_reduce
+ .long 0
+ .byte 0,12,4,0,0x80,11,2,0
+ .long 0
+.size x25519_fe51_sqr,.-x25519_fe51_sqr
+___
+}
+$code.=<<___;
+.globl x25519_fe51_mul121666
+.type x25519_fe51_mul121666,\@function
+.align 5
+x25519_fe51_mul121666:
+ stdu $sp,-$FRAME($sp)
+ std r21,`$FRAME-8*11`($sp)
+ std r22,`$FRAME-8*10`($sp)
+ std r23,`$FRAME-8*9`($sp)
+ std r24,`$FRAME-8*8`($sp)
+ std r25,`$FRAME-8*7`($sp)
+ std r26,`$FRAME-8*6`($sp)
+ std r27,`$FRAME-8*5`($sp)
+ std r28,`$FRAME-8*4`($sp)
+ std r29,`$FRAME-8*3`($sp)
+ std r30,`$FRAME-8*2`($sp)
+ std r31,`$FRAME-8*1`($sp)
+
+ lis $bi,`65536>>16`
+ ori $bi,$bi,`121666-65536`
+ ld $a0,0($ap)
+ ld $a1,8($ap)
+ ld $a2,16($ap)
+ ld $a3,24($ap)
+ ld $a4,32($ap)
+
+ mulld $h0lo,$a0,$bi # a[0]*121666
+ mulhdu $h0hi,$a0,$bi
+ mulld $h1lo,$a1,$bi # a[1]*121666
+ mulhdu $h1hi,$a1,$bi
+ mulld $h2lo,$a2,$bi # a[2]*121666
+ mulhdu $h2hi,$a2,$bi
+ mulld $h3lo,$a3,$bi # a[3]*121666
+ mulhdu $h3hi,$a3,$bi
+ mulld $h4lo,$a4,$bi # a[4]*121666
+ mulhdu $h4hi,$a4,$bi
+
+ b .Lfe51_reduce
+ .long 0
+ .byte 0,12,4,0,0x80,11,2,0
+ .long 0
+.size x25519_fe51_mul121666,.-x25519_fe51_mul121666
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;
diff --git a/deps/openssl/openssl/crypto/ec/asm/x25519-x86_64.pl b/deps/openssl/openssl/crypto/ec/asm/x25519-x86_64.pl
new file mode 100755
index 0000000000..18dc6af9fa
--- /dev/null
+++ b/deps/openssl/openssl/crypto/ec/asm/x25519-x86_64.pl
@@ -0,0 +1,1117 @@
+#!/usr/bin/env perl
+# Copyright 2018 The OpenSSL Project Authors. All Rights Reserved.
+#
+# Licensed under the OpenSSL license (the "License"). You may not use
+# this file except in compliance with the License. You can obtain a copy
+# in the file LICENSE in the source distribution or at
+# https://www.openssl.org/source/license.html
+#
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+# ====================================================================
+#
+# X25519 lower-level primitives for x86_64.
+#
+# February 2018.
+#
+# This module implements radix 2^51 multiplication and squaring, and
+# radix 2^64 multiplication, squaring, addition, subtraction and final
+# reduction. Latter radix is used on ADCX/ADOX-capable processors such
+# as Broadwell. On related note one should mention that there are
+# vector implementations that provide significantly better performance
+# on some processors(*), but they are large and overly complex. Which
+# in combination with them being effectively processor-specific makes
+# the undertaking hard to justify. The goal for this implementation
+# is rather versatility and simplicity [and ultimately formal
+# verification].
+#
+# (*) For example sandy2x should provide ~30% improvement on Sandy
+# Bridge, but only nominal ~5% on Haswell [and big loss on
+# Broadwell and successors].
+#
+######################################################################
+# Improvement coefficients:
+#
+# amd64-51(*) gcc-5.x(**)
+#
+# P4 +22% +40%
+# Sandy Bridge -3% +11%
+# Haswell -1% +13%
+# Broadwell(***) +30% +35%
+# Skylake(***) +33% +47%
+# Silvermont +20% +26%
+# Goldmont +40% +50%
+# Bulldozer +20% +9%
+# Ryzen(***) +43% +40%
+# VIA +170% +120%
+#
+# (*) amd64-51 is popular assembly implementation with 2^51 radix,
+# only multiplication and squaring subroutines were linked
+# for comparison, but not complete ladder step; gain on most
+# processors is because this module refrains from shld, and
+# minor regression on others is because this does result in
+# higher instruction count;
+# (**) compiler is free to inline functions, in assembly one would
+# need to implement ladder step to do that, and it will improve
+# performance by several percent;
+# (***) ADCX/ADOX result for 2^64 radix, there is no corresponding
+# C implementation, so that comparison is always against
+# 2^51 radix;
+
+$flavour = shift;
+$output = shift;
+if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
+
+$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
+
+$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
+( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
+( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
+die "can't locate x86_64-xlate.pl";
+
+open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
+*STDOUT=*OUT;
+
+if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
+ =~ /GNU assembler version ([2-9]\.[0-9]+)/) {
+ $addx = ($1>=2.23);
+}
+
+if (!$addx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) &&
+ `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
+ $addx = ($1>=2.10);
+}
+
+if (!$addx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) &&
+ `ml64 2>&1` =~ /Version ([0-9]+)\./) {
+ $addx = ($1>=12);
+}
+
+if (!$addx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9])\.([0-9]+)/) {
+ my $ver = $2 + $3/100.0; # 3.1->3.01, 3.10->3.10
+ $addx = ($ver>=3.03);
+}
+
+$code.=<<___;
+.text
+
+.globl x25519_fe51_mul
+.type x25519_fe51_mul,\@function,3
+.align 32
+x25519_fe51_mul:
+.cfi_startproc
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+ lea -8*5(%rsp),%rsp
+.cfi_adjust_cfa_offset 40
+.Lfe51_mul_body:
+
+ mov 8*0(%rsi),%rax # f[0]
+ mov 8*0(%rdx),%r11 # load g[0-4]
+ mov 8*1(%rdx),%r12
+ mov 8*2(%rdx),%r13
+ mov 8*3(%rdx),%rbp
+ mov 8*4(%rdx),%r14
+
+ mov %rdi,8*4(%rsp) # offload 1st argument
+ mov %rax,%rdi
+ mulq %r11 # f[0]*g[0]
+ mov %r11,8*0(%rsp) # offload g[0]
+ mov %rax,%rbx # %rbx:%rcx = h0
+ mov %rdi,%rax
+ mov %rdx,%rcx
+ mulq %r12 # f[0]*g[1]
+ mov %r12,8*1(%rsp) # offload g[1]
+ mov %rax,%r8 # %r8:%r9 = h1
+ mov %rdi,%rax
+ lea (%r14,%r14,8),%r15
+ mov %rdx,%r9
+ mulq %r13 # f[0]*g[2]
+ mov %r13,8*2(%rsp) # offload g[2]
+ mov %rax,%r10 # %r10:%r11 = h2
+ mov %rdi,%rax
+ lea (%r14,%r15,2),%rdi # g[4]*19
+ mov %rdx,%r11
+ mulq %rbp # f[0]*g[3]
+ mov %rax,%r12 # %r12:%r13 = h3
+ mov 8*0(%rsi),%rax # f[0]
+ mov %rdx,%r13
+ mulq %r14 # f[0]*g[4]
+ mov %rax,%r14 # %r14:%r15 = h4
+ mov 8*1(%rsi),%rax # f[1]
+ mov %rdx,%r15
+
+ mulq %rdi # f[1]*g[4]*19
+ add %rax,%rbx
+ mov 8*2(%rsi),%rax # f[2]
+ adc %rdx,%rcx
+ mulq %rdi # f[2]*g[4]*19
+ add %rax,%r8
+ mov 8*3(%rsi),%rax # f[3]
+ adc %rdx,%r9
+ mulq %rdi # f[3]*g[4]*19
+ add %rax,%r10
+ mov 8*4(%rsi),%rax # f[4]
+ adc %rdx,%r11
+ mulq %rdi # f[4]*g[4]*19
+ imulq \$19,%rbp,%rdi # g[3]*19
+ add %rax,%r12
+ mov 8*1(%rsi),%rax # f[1]
+ adc %rdx,%r13
+ mulq %rbp # f[1]*g[3]
+ mov 8*2(%rsp),%rbp # g[2]
+ add %rax,%r14
+ mov 8*2(%rsi),%rax # f[2]
+ adc %rdx,%r15
+
+ mulq %rdi # f[2]*g[3]*19
+ add %rax,%rbx
+ mov 8*3(%rsi),%rax # f[3]
+ adc %rdx,%rcx
+ mulq %rdi # f[3]*g[3]*19
+ add %rax,%r8
+ mov 8*4(%rsi),%rax # f[4]
+ adc %rdx,%r9
+ mulq %rdi # f[4]*g[3]*19
+ imulq \$19,%rbp,%rdi # g[2]*19
+ add %rax,%r10
+ mov 8*1(%rsi),%rax # f[1]
+ adc %rdx,%r11
+ mulq %rbp # f[1]*g[2]
+ add %rax,%r12
+ mov 8*2(%rsi),%rax # f[2]
+ adc %rdx,%r13
+ mulq %rbp # f[2]*g[2]
+ mov 8*1(%rsp),%rbp # g[1]
+ add %rax,%r14
+ mov 8*3(%rsi),%rax # f[3]
+ adc %rdx,%r15
+
+ mulq %rdi # f[3]*g[2]*19
+ add %rax,%rbx
+ mov 8*4(%rsi),%rax # f[3]
+ adc %rdx,%rcx
+ mulq %rdi # f[4]*g[2]*19
+ add %rax,%r8
+ mov 8*1(%rsi),%rax # f[1]
+ adc %rdx,%r9
+ mulq %rbp # f[1]*g[1]
+ imulq \$19,%rbp,%rdi
+ add %rax,%r10
+ mov 8*2(%rsi),%rax # f[2]
+ adc %rdx,%r11
+ mulq %rbp # f[2]*g[1]
+ add %rax,%r12
+ mov 8*3(%rsi),%rax # f[3]
+ adc %rdx,%r13
+ mulq %rbp # f[3]*g[1]
+ mov 8*0(%rsp),%rbp # g[0]
+ add %rax,%r14
+ mov 8*4(%rsi),%rax # f[4]
+ adc %rdx,%r15
+
+ mulq %rdi # f[4]*g[1]*19
+ add %rax,%rbx
+ mov 8*1(%rsi),%rax # f[1]
+ adc %rdx,%rcx
+ mul %rbp # f[1]*g[0]
+ add %rax,%r8
+ mov 8*2(%rsi),%rax # f[2]
+ adc %rdx,%r9
+ mul %rbp # f[2]*g[0]
+ add %rax,%r10
+ mov 8*3(%rsi),%rax # f[3]
+ adc %rdx,%r11
+ mul %rbp # f[3]*g[0]
+ add %rax,%r12
+ mov 8*4(%rsi),%rax # f[4]
+ adc %rdx,%r13
+ mulq %rbp # f[4]*g[0]
+ add %rax,%r14
+ adc %rdx,%r15
+
+ mov 8*4(%rsp),%rdi # restore 1st argument
+ jmp .Lreduce51
+.Lfe51_mul_epilogue:
+.cfi_endproc
+.size x25519_fe51_mul,.-x25519_fe51_mul
+
+.globl x25519_fe51_sqr
+.type x25519_fe51_sqr,\@function,2
+.align 32
+x25519_fe51_sqr:
+.cfi_startproc
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+ lea -8*5(%rsp),%rsp
+.cfi_adjust_cfa_offset 40
+.Lfe51_sqr_body:
+
+ mov 8*0(%rsi),%rax # g[0]
+ mov 8*2(%rsi),%r15 # g[2]
+ mov 8*4(%rsi),%rbp # g[4]
+
+ mov %rdi,8*4(%rsp) # offload 1st argument
+ lea (%rax,%rax),%r14
+ mulq %rax # g[0]*g[0]
+ mov %rax,%rbx
+ mov 8*1(%rsi),%rax # g[1]
+ mov %rdx,%rcx
+ mulq %r14 # 2*g[0]*g[1]
+ mov %rax,%r8
+ mov %r15,%rax
+ mov %r15,8*0(%rsp) # offload g[2]
+ mov %rdx,%r9
+ mulq %r14 # 2*g[0]*g[2]
+ mov %rax,%r10
+ mov 8*3(%rsi),%rax
+ mov %rdx,%r11
+ imulq \$19,%rbp,%rdi # g[4]*19
+ mulq %r14 # 2*g[0]*g[3]
+ mov %rax,%r12
+ mov %rbp,%rax
+ mov %rdx,%r13
+ mulq %r14 # 2*g[0]*g[4]
+ mov %rax,%r14
+ mov %rbp,%rax
+ mov %rdx,%r15
+
+ mulq %rdi # g[4]*g[4]*19
+ add %rax,%r12
+ mov 8*1(%rsi),%rax # g[1]
+ adc %rdx,%r13
+
+ mov 8*3(%rsi),%rsi # g[3]
+ lea (%rax,%rax),%rbp
+ mulq %rax # g[1]*g[1]
+ add %rax,%r10
+ mov 8*0(%rsp),%rax # g[2]
+ adc %rdx,%r11
+ mulq %rbp # 2*g[1]*g[2]
+ add %rax,%r12
+ mov %rbp,%rax
+ adc %rdx,%r13
+ mulq %rsi # 2*g[1]*g[3]
+ add %rax,%r14
+ mov %rbp,%rax
+ adc %rdx,%r15
+ imulq \$19,%rsi,%rbp # g[3]*19
+ mulq %rdi # 2*g[1]*g[4]*19
+ add %rax,%rbx
+ lea (%rsi,%rsi),%rax
+ adc %rdx,%rcx
+
+ mulq %rdi # 2*g[3]*g[4]*19
+ add %rax,%r10
+ mov %rsi,%rax
+ adc %rdx,%r11
+ mulq %rbp # g[3]*g[3]*19
+ add %rax,%r8
+ mov 8*0(%rsp),%rax # g[2]
+ adc %rdx,%r9
+
+ lea (%rax,%rax),%rsi
+ mulq %rax # g[2]*g[2]
+ add %rax,%r14
+ mov %rbp,%rax
+ adc %rdx,%r15
+ mulq %rsi # 2*g[2]*g[3]*19
+ add %rax,%rbx
+ mov %rsi,%rax
+ adc %rdx,%rcx
+ mulq %rdi # 2*g[2]*g[4]*19
+ add %rax,%r8
+ adc %rdx,%r9
+
+ mov 8*4(%rsp),%rdi # restore 1st argument
+ jmp .Lreduce51
+
+.align 32
+.Lreduce51:
+ mov \$0x7ffffffffffff,%rbp
+
+ mov %r10,%rdx
+ shr \$51,%r10
+ shl \$13,%r11
+ and %rbp,%rdx # %rdx = g2 = h2 & mask
+ or %r10,%r11 # h2>>51
+ add %r11,%r12
+ adc \$0,%r13 # h3 += h2>>51
+
+ mov %rbx,%rax
+ shr \$51,%rbx
+ shl \$13,%rcx
+ and %rbp,%rax # %rax = g0 = h0 & mask
+ or %rbx,%rcx # h0>>51
+ add %rcx,%r8 # h1 += h0>>51
+ adc \$0,%r9
+
+ mov %r12,%rbx
+ shr \$51,%r12
+ shl \$13,%r13
+ and %rbp,%rbx # %rbx = g3 = h3 & mask
+ or %r12,%r13 # h3>>51
+ add %r13,%r14 # h4 += h3>>51
+ adc \$0,%r15
+
+ mov %r8,%rcx
+ shr \$51,%r8
+ shl \$13,%r9
+ and %rbp,%rcx # %rcx = g1 = h1 & mask
+ or %r8,%r9
+ add %r9,%rdx # g2 += h1>>51
+
+ mov %r14,%r10
+ shr \$51,%r14
+ shl \$13,%r15
+ and %rbp,%r10 # %r10 = g4 = h0 & mask
+ or %r14,%r15 # h0>>51
+
+ lea (%r15,%r15,8),%r14
+ lea (%r15,%r14,2),%r15
+ add %r15,%rax # g0 += (h0>>51)*19
+
+ mov %rdx,%r8
+ and %rbp,%rdx # g2 &= mask
+ shr \$51,%r8
+ add %r8,%rbx # g3 += g2>>51
+
+ mov %rax,%r9
+ and %rbp,%rax # g0 &= mask
+ shr \$51,%r9
+ add %r9,%rcx # g1 += g0>>51
+
+ mov %rax,8*0(%rdi) # save the result
+ mov %rcx,8*1(%rdi)
+ mov %rdx,8*2(%rdi)
+ mov %rbx,8*3(%rdi)
+ mov %r10,8*4(%rdi)
+
+ mov 8*5(%rsp),%r15
+.cfi_restore %r15
+ mov 8*6(%rsp),%r14
+.cfi_restore %r14
+ mov 8*7(%rsp),%r13
+.cfi_restore %r13
+ mov 8*8(%rsp),%r12
+.cfi_restore %r12
+ mov 8*9(%rsp),%rbx
+.cfi_restore %rbx
+ mov 8*10(%rsp),%rbp
+.cfi_restore %rbp
+ lea 8*11(%rsp),%rsp
+.cfi_adjust_cfa_offset 88
+.Lfe51_sqr_epilogue:
+ ret
+.cfi_endproc
+.size x25519_fe51_sqr,.-x25519_fe51_sqr
+
+.globl x25519_fe51_mul121666
+.type x25519_fe51_mul121666,\@function,2
+.align 32
+x25519_fe51_mul121666:
+.cfi_startproc
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+ lea -8*5(%rsp),%rsp
+.cfi_adjust_cfa_offset 40
+.Lfe51_mul121666_body:
+ mov \$121666,%eax
+
+ mulq 8*0(%rsi)
+ mov %rax,%rbx # %rbx:%rcx = h0
+ mov \$121666,%eax
+ mov %rdx,%rcx
+ mulq 8*1(%rsi)
+ mov %rax,%r8 # %r8:%r9 = h1
+ mov \$121666,%eax
+ mov %rdx,%r9
+ mulq 8*2(%rsi)
+ mov %rax,%r10 # %r10:%r11 = h2
+ mov \$121666,%eax
+ mov %rdx,%r11
+ mulq 8*3(%rsi)
+ mov %rax,%r12 # %r12:%r13 = h3
+ mov \$121666,%eax # f[0]
+ mov %rdx,%r13
+ mulq 8*4(%rsi)
+ mov %rax,%r14 # %r14:%r15 = h4
+ mov %rdx,%r15
+
+ jmp .Lreduce51
+.Lfe51_mul121666_epilogue:
+.cfi_endproc
+.size x25519_fe51_mul121666,.-x25519_fe51_mul121666
+___
+########################################################################
+# Base 2^64 subroutines modulo 2*(2^255-19)
+#
+if ($addx) {
+my ($acc0,$acc1,$acc2,$acc3,$acc4,$acc5,$acc6,$acc7) = map("%r$_",(8..15));
+
+$code.=<<___;
+.extern OPENSSL_ia32cap_P
+.globl x25519_fe64_eligible
+.type x25519_fe64_eligible,\@abi-omnipotent
+.align 32
+x25519_fe64_eligible:
+ mov OPENSSL_ia32cap_P+8(%rip),%ecx
+ xor %eax,%eax
+ and \$0x80100,%ecx
+ cmp \$0x80100,%ecx
+ cmove %ecx,%eax
+ ret
+.size x25519_fe64_eligible,.-x25519_fe64_eligible
+
+.globl x25519_fe64_mul
+.type x25519_fe64_mul,\@function,3
+.align 32
+x25519_fe64_mul:
+.cfi_startproc
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+ push %rdi # offload dst
+.cfi_push %rdi
+ lea -8*2(%rsp),%rsp
+.cfi_adjust_cfa_offset 16
+.Lfe64_mul_body:
+
+ mov %rdx,%rax
+ mov 8*0(%rdx),%rbp # b[0]
+ mov 8*0(%rsi),%rdx # a[0]
+ mov 8*1(%rax),%rcx # b[1]
+ mov 8*2(%rax),$acc6 # b[2]
+ mov 8*3(%rax),$acc7 # b[3]
+
+ mulx %rbp,$acc0,%rax # a[0]*b[0]
+ xor %edi,%edi # cf=0,of=0
+ mulx %rcx,$acc1,%rbx # a[0]*b[1]
+ adcx %rax,$acc1
+ mulx $acc6,$acc2,%rax # a[0]*b[2]
+ adcx %rbx,$acc2
+ mulx $acc7,$acc3,$acc4 # a[0]*b[3]
+ mov 8*1(%rsi),%rdx # a[1]
+ adcx %rax,$acc3
+ mov $acc6,(%rsp) # offload b[2]
+ adcx %rdi,$acc4 # cf=0
+
+ mulx %rbp,%rax,%rbx # a[1]*b[0]
+ adox %rax,$acc1
+ adcx %rbx,$acc2
+ mulx %rcx,%rax,%rbx # a[1]*b[1]
+ adox %rax,$acc2
+ adcx %rbx,$acc3
+ mulx $acc6,%rax,%rbx # a[1]*b[2]
+ adox %rax,$acc3
+ adcx %rbx,$acc4
+ mulx $acc7,%rax,$acc5 # a[1]*b[3]
+ mov 8*2(%rsi),%rdx # a[2]
+ adox %rax,$acc4
+ adcx %rdi,$acc5 # cf=0
+ adox %rdi,$acc5 # of=0
+
+ mulx %rbp,%rax,%rbx # a[2]*b[0]
+ adcx %rax,$acc2
+ adox %rbx,$acc3
+ mulx %rcx,%rax,%rbx # a[2]*b[1]
+ adcx %rax,$acc3
+ adox %rbx,$acc4
+ mulx $acc6,%rax,%rbx # a[2]*b[2]
+ adcx %rax,$acc4
+ adox %rbx,$acc5
+ mulx $acc7,%rax,$acc6 # a[2]*b[3]
+ mov 8*3(%rsi),%rdx # a[3]
+ adcx %rax,$acc5
+ adox %rdi,$acc6 # of=0
+ adcx %rdi,$acc6 # cf=0
+
+ mulx %rbp,%rax,%rbx # a[3]*b[0]
+ adox %rax,$acc3
+ adcx %rbx,$acc4
+ mulx %rcx,%rax,%rbx # a[3]*b[1]
+ adox %rax,$acc4
+ adcx %rbx,$acc5
+ mulx (%rsp),%rax,%rbx # a[3]*b[2]
+ adox %rax,$acc5
+ adcx %rbx,$acc6
+ mulx $acc7,%rax,$acc7 # a[3]*b[3]
+ mov \$38,%edx
+ adox %rax,$acc6
+ adcx %rdi,$acc7 # cf=0
+ adox %rdi,$acc7 # of=0
+
+ jmp .Lreduce64
+.Lfe64_mul_epilogue:
+.cfi_endproc
+.size x25519_fe64_mul,.-x25519_fe64_mul
+
+.globl x25519_fe64_sqr
+.type x25519_fe64_sqr,\@function,2
+.align 32
+x25519_fe64_sqr:
+.cfi_startproc
+ push %rbp
+.cfi_push %rbp
+ push %rbx
+.cfi_push %rbx
+ push %r12
+.cfi_push %r12
+ push %r13
+.cfi_push %r13
+ push %r14
+.cfi_push %r14
+ push %r15
+.cfi_push %r15
+ push %rdi # offload dst
+.cfi_push %rdi
+ lea -8*2(%rsp),%rsp
+.cfi_adjust_cfa_offset 16
+.Lfe64_sqr_body:
+
+ mov 8*0(%rsi),%rdx # a[0]
+ mov 8*1(%rsi),%rcx # a[1]
+ mov 8*2(%rsi),%rbp # a[2]
+ mov 8*3(%rsi),%rsi # a[3]
+
+ ################################################################
+ mulx %rdx,$acc0,$acc7 # a[0]*a[0]
+ mulx %rcx,$acc1,%rax # a[0]*a[1]
+ xor %edi,%edi # cf=0,of=0
+ mulx %rbp,$acc2,%rbx # a[0]*a[2]
+ adcx %rax,$acc2
+ mulx %rsi,$acc3,$acc4 # a[0]*a[3]
+ mov %rcx,%rdx # a[1]
+ adcx %rbx,$acc3
+ adcx %rdi,$acc4 # cf=0
+
+ ################################################################
+ mulx %rbp,%rax,%rbx # a[1]*a[2]
+ adox %rax,$acc3
+ adcx %rbx,$acc4
+ mulx %rsi,%rax,$acc5 # a[1]*a[3]
+ mov %rbp,%rdx # a[2]
+ adox %rax,$acc4
+ adcx %rdi,$acc5
+
+ ################################################################
+ mulx %rsi,%rax,$acc6 # a[2]*a[3]
+ mov %rcx,%rdx # a[1]
+ adox %rax,$acc5
+ adcx %rdi,$acc6 # cf=0
+ adox %rdi,$acc6 # of=0
+
+ adcx $acc1,$acc1 # acc1:6<<1
+ adox $acc7,$acc1
+ adcx $acc2,$acc2
+ mulx %rdx,%rax,%rbx # a[1]*a[1]
+ mov %rbp,%rdx # a[2]
+ adcx $acc3,$acc3
+ adox %rax,$acc2
+ adcx $acc4,$acc4
+ adox %rbx,$acc3
+ mulx %rdx,%rax,%rbx # a[2]*a[2]
+ mov %rsi,%rdx # a[3]
+ adcx $acc5,$acc5
+ adox %rax,$acc4
+ adcx $acc6,$acc6
+ adox %rbx,$acc5
+ mulx %rdx,%rax,$acc7 # a[3]*a[3]
+ mov \$38,%edx
+ adox %rax,$acc6
+ adcx %rdi,$acc7 # cf=0
+ adox %rdi,$acc7 # of=0
+ jmp .Lreduce64
+
+.align 32
+.Lreduce64:
+ mulx $acc4,%rax,%rbx
+ adcx %rax,$acc0
+ adox %rbx,$acc1
+ mulx $acc5,%rax,%rbx
+ adcx %rax,$acc1
+ adox %rbx,$acc2
+ mulx $acc6,%rax,%rbx
+ adcx %rax,$acc2
+ adox %rbx,$acc3
+ mulx $acc7,%rax,$acc4
+ adcx %rax,$acc3
+ adox %rdi,$acc4
+ adcx %rdi,$acc4
+
+ mov 8*2(%rsp),%rdi # restore dst
+ imulq %rdx,$acc4
+
+ add $acc4,$acc0
+ adc \$0,$acc1
+ adc \$0,$acc2
+ adc \$0,$acc3
+
+ sbb %rax,%rax # cf -> mask
+ and \$38,%rax
+
+ add %rax,$acc0
+ mov $acc1,8*1(%rdi)
+ mov $acc2,8*2(%rdi)
+ mov $acc3,8*3(%rdi)
+ mov $acc0,8*0(%rdi)
+
+ mov 8*3(%rsp),%r15
+.cfi_restore %r15
+ mov 8*4(%rsp),%r14
+.cfi_restore %r14
+ mov 8*5(%rsp),%r13
+.cfi_restore %r13
+ mov 8*6(%rsp),%r12
+.cfi_restore %r12
+ mov 8*7(%rsp),%rbx
+.cfi_restore %rbx
+ mov 8*8(%rsp),%rbp
+.cfi_restore %rbp
+ lea 8*9(%rsp),%rsp
+.cfi_adjust_cfa_offset 88
+.Lfe64_sqr_epilogue:
+ ret
+.cfi_endproc
+.size x25519_fe64_sqr,.-x25519_fe64_sqr
+
+.globl x25519_fe64_mul121666
+.type x25519_fe64_mul121666,\@function,2
+.align 32
+x25519_fe64_mul121666:
+.Lfe64_mul121666_body:
+ mov \$121666,%edx
+ mulx 8*0(%rsi),$acc0,%rcx
+ mulx 8*1(%rsi),$acc1,%rax
+ add %rcx,$acc1
+ mulx 8*2(%rsi),$acc2,%rcx
+ adc %rax,$acc2
+ mulx 8*3(%rsi),$acc3,%rax
+ adc %rcx,$acc3
+ adc \$0,%rax
+
+ imulq \$38,%rax,%rax
+
+ add %rax,$acc0
+ adc \$0,$acc1
+ adc \$0,$acc2
+ adc \$0,$acc3
+
+ sbb %rax,%rax # cf -> mask
+ and \$38,%rax
+
+ add %rax,$acc0
+ mov $acc1,8*1(%rdi)
+ mov $acc2,8*2(%rdi)
+ mov $acc3,8*3(%rdi)
+ mov $acc0,8*0(%rdi)
+
+.Lfe64_mul121666_epilogue:
+ ret
+.size x25519_fe64_mul121666,.-x25519_fe64_mul121666
+
+.globl x25519_fe64_add
+.type x25519_fe64_add,\@function,3
+.align 32
+x25519_fe64_add:
+.Lfe64_add_body:
+ mov 8*0(%rsi),$acc0
+ mov 8*1(%rsi),$acc1
+ mov 8*2(%rsi),$acc2
+ mov 8*3(%rsi),$acc3
+
+ add 8*0(%rdx),$acc0
+ adc 8*1(%rdx),$acc1
+ adc 8*2(%rdx),$acc2
+ adc 8*3(%rdx),$acc3
+
+ sbb %rax,%rax # cf -> mask
+ and \$38,%rax
+
+ add %rax,$acc0
+ adc \$0,$acc1
+ adc \$0,$acc2
+ mov $acc1,8*1(%rdi)
+ adc \$0,$acc3
+ mov $acc2,8*2(%rdi)
+ sbb %rax,%rax # cf -> mask
+ mov $acc3,8*3(%rdi)
+ and \$38,%rax
+
+ add %rax,$acc0
+ mov $acc0,8*0(%rdi)
+
+.Lfe64_add_epilogue:
+ ret
+.size x25519_fe64_add,.-x25519_fe64_add
+
+.globl x25519_fe64_sub
+.type x25519_fe64_sub,\@function,3
+.align 32
+x25519_fe64_sub:
+.Lfe64_sub_body:
+ mov 8*0(%rsi),$acc0
+ mov 8*1(%rsi),$acc1
+ mov 8*2(%rsi),$acc2
+ mov 8*3(%rsi),$acc3
+
+ sub 8*0(%rdx),$acc0
+ sbb 8*1(%rdx),$acc1
+ sbb 8*2(%rdx),$acc2
+ sbb 8*3(%rdx),$acc3
+
+ sbb %rax,%rax # cf -> mask
+ and \$38,%rax
+
+ sub %rax,$acc0
+ sbb \$0,$acc1
+ sbb \$0,$acc2
+ mov $acc1,8*1(%rdi)
+ sbb \$0,$acc3
+ mov $acc2,8*2(%rdi)
+ sbb %rax,%rax # cf -> mask
+ mov $acc3,8*3(%rdi)
+ and \$38,%rax
+
+ sub %rax,$acc0
+ mov $acc0,8*0(%rdi)
+
+.Lfe64_sub_epilogue:
+ ret
+.size x25519_fe64_sub,.-x25519_fe64_sub
+
+.globl x25519_fe64_tobytes
+.type x25519_fe64_tobytes,\@function,2
+.align 32
+x25519_fe64_tobytes:
+.Lfe64_to_body:
+ mov 8*0(%rsi),$acc0
+ mov 8*1(%rsi),$acc1
+ mov 8*2(%rsi),$acc2
+ mov 8*3(%rsi),$acc3
+
+ ################################# reduction modulo 2^255-19
+ lea ($acc3,$acc3),%rax
+ sar \$63,$acc3 # most significant bit -> mask
+ shr \$1,%rax # most significant bit cleared
+ and \$19,$acc3
+ add \$19,$acc3 # compare to modulus in the same go
+
+ add $acc3,$acc0
+ adc \$0,$acc1
+ adc \$0,$acc2
+ adc \$0,%rax
+
+ lea (%rax,%rax),$acc3
+ sar \$63,%rax # most significant bit -> mask
+ shr \$1,$acc3 # most significant bit cleared
+ not %rax
+ and \$19,%rax
+
+ sub %rax,$acc0
+ sbb \$0,$acc1
+ sbb \$0,$acc2
+ sbb \$0,$acc3
+
+ mov $acc0,8*0(%rdi)
+ mov $acc1,8*1(%rdi)
+ mov $acc2,8*2(%rdi)
+ mov $acc3,8*3(%rdi)
+
+.Lfe64_to_epilogue:
+ ret
+.size x25519_fe64_tobytes,.-x25519_fe64_tobytes
+___
+} else {
+$code.=<<___;
+.globl x25519_fe64_eligible
+.type x25519_fe64_eligible,\@abi-omnipotent
+.align 32
+x25519_fe64_eligible:
+ xor %eax,%eax
+ ret
+.size x25519_fe64_eligible,.-x25519_fe64_eligible
+
+.globl x25519_fe64_mul
+.type x25519_fe64_mul,\@abi-omnipotent
+.globl x25519_fe64_sqr
+.globl x25519_fe64_mul121666
+.globl x25519_fe64_add
+.globl x25519_fe64_sub
+.globl x25519_fe64_tobytes
+x25519_fe64_mul:
+x25519_fe64_sqr:
+x25519_fe64_mul121666:
+x25519_fe64_add:
+x25519_fe64_sub:
+x25519_fe64_tobytes:
+ .byte 0x0f,0x0b # ud2
+ ret
+.size x25519_fe64_mul,.-x25519_fe64_mul
+___
+}
+$code.=<<___;
+.asciz "X25519 primitives for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
+___
+
+# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
+# CONTEXT *context,DISPATCHER_CONTEXT *disp)
+if ($win64) {
+$rec="%rcx";
+$frame="%rdx";
+$context="%r8";
+$disp="%r9";
+
+$code.=<<___;
+.extern __imp_RtlVirtualUnwind
+
+.type short_handler,\@abi-omnipotent
+.align 16
+short_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+ jmp .Lcommon_seh_tail
+.size short_handler,.-short_handler
+
+.type full_handler,\@abi-omnipotent
+.align 16
+full_handler:
+ push %rsi
+ push %rdi
+ push %rbx
+ push %rbp
+ push %r12
+ push %r13
+ push %r14
+ push %r15
+ pushfq
+ sub \$64,%rsp
+
+ mov 120($context),%rax # pull context->Rax
+ mov 248($context),%rbx # pull context->Rip
+
+ mov 8($disp),%rsi # disp->ImageBase
+ mov 56($disp),%r11 # disp->HandlerData
+
+ mov 0(%r11),%r10d # HandlerData[0]
+ lea (%rsi,%r10),%r10 # end of prologue label
+ cmp %r10,%rbx # context->Rip<end of prologue label
+ jb .Lcommon_seh_tail
+
+ mov 152($context),%rax # pull context->Rsp
+
+ mov 4(%r11),%r10d # HandlerData[1]
+ lea (%rsi,%r10),%r10 # epilogue label
+ cmp %r10,%rbx # context->Rip>=epilogue label
+ jae .Lcommon_seh_tail
+
+ mov 8(%r11),%r10d # HandlerData[2]
+ lea (%rax,%r10),%rax
+
+ mov -8(%rax),%rbp
+ mov -16(%rax),%rbx
+ mov -24(%rax),%r12
+ mov -32(%rax),%r13
+ mov -40(%rax),%r14
+ mov -48(%rax),%r15
+ mov %rbx,144($context) # restore context->Rbx
+ mov %rbp,160($context) # restore context->Rbp
+ mov %r12,216($context) # restore context->R12
+ mov %r13,224($context) # restore context->R13
+ mov %r14,232($context) # restore context->R14
+ mov %r15,240($context) # restore context->R15
+
+.Lcommon_seh_tail:
+ mov 8(%rax),%rdi
+ mov 16(%rax),%rsi
+ mov %rax,152($context) # restore context->Rsp
+ mov %rsi,168($context) # restore context->Rsi
+ mov %rdi,176($context) # restore context->Rdi
+
+ mov 40($disp),%rdi # disp->ContextRecord
+ mov $context,%rsi # context
+ mov \$154,%ecx # sizeof(CONTEXT)
+ .long 0xa548f3fc # cld; rep movsq
+
+ mov $disp,%rsi
+ xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER
+ mov 8(%rsi),%rdx # arg2, disp->ImageBase
+ mov 0(%rsi),%r8 # arg3, disp->ControlPc
+ mov 16(%rsi),%r9 # arg4, disp->FunctionEntry
+ mov 40(%rsi),%r10 # disp->ContextRecord
+ lea 56(%rsi),%r11 # &disp->HandlerData
+ lea 24(%rsi),%r12 # &disp->EstablisherFrame
+ mov %r10,32(%rsp) # arg5
+ mov %r11,40(%rsp) # arg6
+ mov %r12,48(%rsp) # arg7
+ mov %rcx,56(%rsp) # arg8, (NULL)
+ call *__imp_RtlVirtualUnwind(%rip)
+
+ mov \$1,%eax # ExceptionContinueSearch
+ add \$64,%rsp
+ popfq
+ pop %r15
+ pop %r14
+ pop %r13
+ pop %r12
+ pop %rbp
+ pop %rbx
+ pop %rdi
+ pop %rsi
+ ret
+.size full_handler,.-full_handler
+
+.section .pdata
+.align 4
+ .rva .LSEH_begin_x25519_fe51_mul
+ .rva .LSEH_end_x25519_fe51_mul
+ .rva .LSEH_info_x25519_fe51_mul
+
+ .rva .LSEH_begin_x25519_fe51_sqr
+ .rva .LSEH_end_x25519_fe51_sqr
+ .rva .LSEH_info_x25519_fe51_sqr
+
+ .rva .LSEH_begin_x25519_fe51_mul121666
+ .rva .LSEH_end_x25519_fe51_mul121666
+ .rva .LSEH_info_x25519_fe51_mul121666
+___
+$code.=<<___ if ($addx);
+ .rva .LSEH_begin_x25519_fe64_mul
+ .rva .LSEH_end_x25519_fe64_mul
+ .rva .LSEH_info_x25519_fe64_mul
+
+ .rva .LSEH_begin_x25519_fe64_sqr
+ .rva .LSEH_end_x25519_fe64_sqr
+ .rva .LSEH_info_x25519_fe64_sqr
+
+ .rva .LSEH_begin_x25519_fe64_mul121666
+ .rva .LSEH_end_x25519_fe64_mul121666
+ .rva .LSEH_info_x25519_fe64_mul121666
+
+ .rva .LSEH_begin_x25519_fe64_add
+ .rva .LSEH_end_x25519_fe64_add
+ .rva .LSEH_info_x25519_fe64_add
+
+ .rva .LSEH_begin_x25519_fe64_sub
+ .rva .LSEH_end_x25519_fe64_sub
+ .rva .LSEH_info_x25519_fe64_sub
+
+ .rva .LSEH_begin_x25519_fe64_tobytes
+ .rva .LSEH_end_x25519_fe64_tobytes
+ .rva .LSEH_info_x25519_fe64_tobytes
+___
+$code.=<<___;
+.section .xdata
+.align 8
+.LSEH_info_x25519_fe51_mul:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lfe51_mul_body,.Lfe51_mul_epilogue # HandlerData[]
+ .long 88,0
+.LSEH_info_x25519_fe51_sqr:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lfe51_sqr_body,.Lfe51_sqr_epilogue # HandlerData[]
+ .long 88,0
+.LSEH_info_x25519_fe51_mul121666:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lfe51_mul121666_body,.Lfe51_mul121666_epilogue # HandlerData[]
+ .long 88,0
+___
+$code.=<<___ if ($addx);
+.LSEH_info_x25519_fe64_mul:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lfe64_mul_body,.Lfe64_mul_epilogue # HandlerData[]
+ .long 72,0
+.LSEH_info_x25519_fe64_sqr:
+ .byte 9,0,0,0
+ .rva full_handler
+ .rva .Lfe64_sqr_body,.Lfe64_sqr_epilogue # HandlerData[]
+ .long 72,0
+.LSEH_info_x25519_fe64_mul121666:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lfe64_mul121666_body,.Lfe64_mul121666_epilogue # HandlerData[]
+.LSEH_info_x25519_fe64_add:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lfe64_add_body,.Lfe64_add_epilogue # HandlerData[]
+.LSEH_info_x25519_fe64_sub:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lfe64_sub_body,.Lfe64_sub_epilogue # HandlerData[]
+.LSEH_info_x25519_fe64_tobytes:
+ .byte 9,0,0,0
+ .rva short_handler
+ .rva .Lfe64_to_body,.Lfe64_to_epilogue # HandlerData[]
+___
+}
+
+$code =~ s/\`([^\`]*)\`/eval $1/gem;
+print $code;
+close STDOUT;