summaryrefslogtreecommitdiff
path: root/drivers/builtin_openssl2/crypto/bn/asm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/builtin_openssl2/crypto/bn/asm')
-rw-r--r--drivers/builtin_openssl2/crypto/bn/asm/armv4-gf2m.pl24
-rw-r--r--drivers/builtin_openssl2/crypto/bn/asm/ia64.S4
-rw-r--r--drivers/builtin_openssl2/crypto/bn/asm/mips.pl611
-rw-r--r--drivers/builtin_openssl2/crypto/bn/asm/s390x-gf2m.pl6
-rw-r--r--drivers/builtin_openssl2/crypto/bn/asm/x86-gf2m.pl16
-rw-r--r--drivers/builtin_openssl2/crypto/bn/asm/x86_64-gcc.c1102
-rw-r--r--drivers/builtin_openssl2/crypto/bn/asm/x86_64-gf2m.pl16
-rwxr-xr-xdrivers/builtin_openssl2/crypto/bn/asm/x86_64-mont5.pl513
8 files changed, 1044 insertions, 1248 deletions
diff --git a/drivers/builtin_openssl2/crypto/bn/asm/armv4-gf2m.pl b/drivers/builtin_openssl2/crypto/bn/asm/armv4-gf2m.pl
index c52e0b75b5..22ad1f85f9 100644
--- a/drivers/builtin_openssl2/crypto/bn/asm/armv4-gf2m.pl
+++ b/drivers/builtin_openssl2/crypto/bn/asm/armv4-gf2m.pl
@@ -41,13 +41,13 @@ $code=<<___;
.align 5
mul_1x1_neon:
vshl.u64 `&Dlo("q1")`,d16,#8 @ q1-q3 are slided $a
- vmull.p8 `&Q("d0")`,d16,d17 @ a·bb
+ vmull.p8 `&Q("d0")`,d16,d17 @ a·bb
vshl.u64 `&Dlo("q2")`,d16,#16
- vmull.p8 q1,`&Dlo("q1")`,d17 @ a<<8·bb
+ vmull.p8 q1,`&Dlo("q1")`,d17 @ a<<8·bb
vshl.u64 `&Dlo("q3")`,d16,#24
- vmull.p8 q2,`&Dlo("q2")`,d17 @ a<<16·bb
+ vmull.p8 q2,`&Dlo("q2")`,d17 @ a<<16·bb
vshr.u64 `&Dlo("q1")`,#8
- vmull.p8 q3,`&Dlo("q3")`,d17 @ a<<24·bb
+ vmull.p8 q3,`&Dlo("q3")`,d17 @ a<<24·bb
vshl.u64 `&Dhi("q1")`,#24
veor d0,`&Dlo("q1")`
vshr.u64 `&Dlo("q2")`,#16
@@ -158,7 +158,7 @@ ___
################
# void bn_GF2m_mul_2x2(BN_ULONG *r,
# BN_ULONG a1,BN_ULONG a0,
-# BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
+# BN_ULONG b1,BN_ULONG b0); # r[3..0]=a1a0·b1b0
($A1,$B1,$A0,$B0,$A1B1,$A0B0)=map("d$_",(18..23));
@@ -184,20 +184,20 @@ bn_GF2m_mul_2x2:
vmov d16,$A1
vmov d17,$B1
- bl mul_1x1_neon @ a1·b1
+ bl mul_1x1_neon @ a1·b1
vmov $A1B1,d0
vmov d16,$A0
vmov d17,$B0
- bl mul_1x1_neon @ a0·b0
+ bl mul_1x1_neon @ a0·b0
vmov $A0B0,d0
veor d16,$A0,$A1
veor d17,$B0,$B1
veor $A0,$A0B0,$A1B1
- bl mul_1x1_neon @ (a0+a1)·(b0+b1)
+ bl mul_1x1_neon @ (a0+a1)·(b0+b1)
- veor d0,$A0 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ veor d0,$A0 @ (a0+a1)·(b0+b1)-a0·b0-a1·b1
vshl.u64 d1,d0,#32
vshr.u64 d0,d0,#32
veor $A0B0,d1
@@ -220,7 +220,7 @@ $code.=<<___;
mov $mask,#7<<2
sub sp,sp,#32 @ allocate tab[8]
- bl mul_1x1_ialu @ a1·b1
+ bl mul_1x1_ialu @ a1·b1
str $lo,[$ret,#8]
str $hi,[$ret,#12]
@@ -230,13 +230,13 @@ $code.=<<___;
eor r2,r2,$a
eor $b,$b,r3
eor $a,$a,r2
- bl mul_1x1_ialu @ a0·b0
+ bl mul_1x1_ialu @ a0·b0
str $lo,[$ret]
str $hi,[$ret,#4]
eor $a,$a,r2
eor $b,$b,r3
- bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
+ bl mul_1x1_ialu @ (a1+a0)·(b1+b0)
___
@r=map("r$_",(6..9));
$code.=<<___;
diff --git a/drivers/builtin_openssl2/crypto/bn/asm/ia64.S b/drivers/builtin_openssl2/crypto/bn/asm/ia64.S
index 951abc53ea..a9a42abfc3 100644
--- a/drivers/builtin_openssl2/crypto/bn/asm/ia64.S
+++ b/drivers/builtin_openssl2/crypto/bn/asm/ia64.S
@@ -422,7 +422,7 @@ bn_mul_add_words:
// This loop spins in 3*(n+10) ticks on Itanium and in 2*(n+10) on
// Itanium 2. Yes, unlike previous versions it scales:-) Previous
-// version was peforming *all* additions in IALU and was starving
+// version was performing *all* additions in IALU and was starving
// for those even on Itanium 2. In this version one addition is
// moved to FPU and is folded with multiplication. This is at cost
// of propogating the result from previous call to this subroutine
@@ -568,7 +568,7 @@ bn_sqr_comba8:
// I've estimated this routine to run in ~120 ticks, but in reality
// (i.e. according to ar.itc) it takes ~160 ticks. Are those extra
// cycles consumed for instructions fetch? Or did I misinterpret some
-// clause in Itanium µ-architecture manual? Comments are welcomed and
+// clause in Itanium µ-architecture manual? Comments are welcomed and
// highly appreciated.
//
// On Itanium 2 it takes ~190 ticks. This is because of stalls on
diff --git a/drivers/builtin_openssl2/crypto/bn/asm/mips.pl b/drivers/builtin_openssl2/crypto/bn/asm/mips.pl
index d2f3ef7bbf..215c9a7483 100644
--- a/drivers/builtin_openssl2/crypto/bn/asm/mips.pl
+++ b/drivers/builtin_openssl2/crypto/bn/asm/mips.pl
@@ -1872,6 +1872,41 @@ ___
($a_4,$a_5,$a_6,$a_7)=($b_0,$b_1,$b_2,$b_3);
+sub add_c2 () {
+my ($hi,$lo,$c0,$c1,$c2,
+ $warm, # !$warm denotes first call with specific sequence of
+ # $c_[XYZ] when there is no Z-carry to accumulate yet;
+ $an,$bn # these two are arguments for multiplication which
+ # result is used in *next* step [which is why it's
+ # commented as "forward multiplication" below];
+ )=@_;
+$code.=<<___;
+ mflo $lo
+ mfhi $hi
+ $ADDU $c0,$lo
+ sltu $at,$c0,$lo
+ $MULTU $an,$bn # forward multiplication
+ $ADDU $c0,$lo
+ $ADDU $at,$hi
+ sltu $lo,$c0,$lo
+ $ADDU $c1,$at
+ $ADDU $hi,$lo
+___
+$code.=<<___ if (!$warm);
+ sltu $c2,$c1,$at
+ $ADDU $c1,$hi
+ sltu $hi,$c1,$hi
+ $ADDU $c2,$hi
+___
+$code.=<<___ if ($warm);
+ sltu $at,$c1,$at
+ $ADDU $c1,$hi
+ $ADDU $c2,$at
+ sltu $hi,$c1,$hi
+ $ADDU $c2,$hi
+___
+}
+
$code.=<<___;
.align 5
@@ -1920,21 +1955,10 @@ $code.=<<___;
sltu $at,$c_2,$t_1
$ADDU $c_3,$t_2,$at
$ST $c_2,$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_2,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_1,$a_1); # mul_add_c(a[1],b[1],c3,c1,c2);
+$code.=<<___;
mflo $t_1
mfhi $t_2
$ADDU $c_3,$t_1
@@ -1945,67 +1969,19 @@ $code.=<<___;
sltu $at,$c_1,$t_2
$ADDU $c_2,$at
$ST $c_3,2*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_3,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_1,$a_2 # mul_add_c2(a[1],b[2],c1,c2,c3);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_3,$at
- $MULTU $a_4,$a_0 # mul_add_c2(a[4],b[0],c2,c3,c1);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_1,$a_2); # mul_add_c2(a[1],b[2],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_4,$a_0); # mul_add_c2(a[4],b[0],c2,c3,c1);
+$code.=<<___;
$ST $c_1,3*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_1,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_1,$at
- $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_3,$a_1); # mul_add_c2(a[3],b[1],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_2,$a_2); # mul_add_c(a[2],b[2],c2,c3,c1);
+$code.=<<___;
mflo $t_1
mfhi $t_2
$ADDU $c_2,$t_1
@@ -2016,97 +1992,23 @@ $code.=<<___;
sltu $at,$c_3,$t_2
$ADDU $c_1,$at
$ST $c_2,4*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_2,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_1,$a_4 # mul_add_c2(a[1],b[4],c3,c1,c2);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_2,$at
- $MULTU $a_2,$a_3 # mul_add_c2(a[2],b[3],c3,c1,c2);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $MULTU $a_6,$a_0 # mul_add_c2(a[6],b[0],c1,c2,c3);
- $ADDU $c_2,$at
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_1,$a_4); # mul_add_c2(a[1],b[4],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_2,$a_3); # mul_add_c2(a[2],b[3],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_6,$a_0); # mul_add_c2(a[6],b[0],c1,c2,c3);
+$code.=<<___;
$ST $c_3,5*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_3,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_5,$a_1 # mul_add_c2(a[5],b[1],c1,c2,c3);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_3,$at
- $MULTU $a_4,$a_2 # mul_add_c2(a[4],b[2],c1,c2,c3);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_3,$at
- $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_5,$a_1); # mul_add_c2(a[5],b[1],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_4,$a_2); # mul_add_c2(a[4],b[2],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_3,$a_3); # mul_add_c(a[3],b[3],c1,c2,c3);
+$code.=<<___;
mflo $t_1
mfhi $t_2
$ADDU $c_1,$t_1
@@ -2117,112 +2019,25 @@ $code.=<<___;
sltu $at,$c_2,$t_2
$ADDU $c_3,$at
$ST $c_1,6*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_1,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_1,$a_6 # mul_add_c2(a[1],b[6],c2,c3,c1);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_1,$at
- $MULTU $a_2,$a_5 # mul_add_c2(a[2],b[5],c2,c3,c1);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_1,$at
- $MULTU $a_3,$a_4 # mul_add_c2(a[3],b[4],c2,c3,c1);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_1,$at
- $MULTU $a_7,$a_1 # mul_add_c2(a[7],b[1],c3,c1,c2);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_1,$a_6); # mul_add_c2(a[1],b[6],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_2,$a_5); # mul_add_c2(a[2],b[5],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_3,$a_4); # mul_add_c2(a[3],b[4],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_7,$a_1); # mul_add_c2(a[7],b[1],c3,c1,c2);
+$code.=<<___;
$ST $c_2,7*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_2,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_6,$a_2 # mul_add_c2(a[6],b[2],c3,c1,c2);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_2,$at
- $MULTU $a_5,$a_3 # mul_add_c2(a[5],b[3],c3,c1,c2);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_2,$at
- $MULTU $a_4,$a_4 # mul_add_c(a[4],b[4],c3,c1,c2);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_6,$a_2); # mul_add_c2(a[6],b[2],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_5,$a_3); # mul_add_c2(a[5],b[3],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_4,$a_4); # mul_add_c(a[4],b[4],c3,c1,c2);
+$code.=<<___;
mflo $t_1
mfhi $t_2
$ADDU $c_3,$t_1
@@ -2233,82 +2048,21 @@ $code.=<<___;
sltu $at,$c_1,$t_2
$ADDU $c_2,$at
$ST $c_3,8*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_3,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_3,$a_6 # mul_add_c2(a[3],b[6],c1,c2,c3);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_3,$at
- $MULTU $a_4,$a_5 # mul_add_c2(a[4],b[5],c1,c2,c3);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_3,$at
- $MULTU $a_7,$a_3 # mul_add_c2(a[7],b[3],c2,c3,c1);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_3,$a_6); # mul_add_c2(a[3],b[6],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_4,$a_5); # mul_add_c2(a[4],b[5],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_7,$a_3); # mul_add_c2(a[7],b[3],c2,c3,c1);
+$code.=<<___;
$ST $c_1,9*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_1,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_6,$a_4 # mul_add_c2(a[6],b[4],c2,c3,c1);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_1,$at
- $MULTU $a_5,$a_5 # mul_add_c(a[5],b[5],c2,c3,c1);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_6,$a_4); # mul_add_c2(a[6],b[4],c2,c3,c1);
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,1,
+ $a_5,$a_5); # mul_add_c(a[5],b[5],c2,c3,c1);
+$code.=<<___;
mflo $t_1
mfhi $t_2
$ADDU $c_2,$t_1
@@ -2319,52 +2073,17 @@ $code.=<<___;
sltu $at,$c_3,$t_2
$ADDU $c_1,$at
$ST $c_2,10*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_2,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_5,$a_6 # mul_add_c2(a[5],b[6],c3,c1,c2);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_2,$at
- $MULTU $a_7,$a_5 # mul_add_c2(a[7],b[5],c1,c2,c3);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_5,$a_6); # mul_add_c2(a[5],b[6],c3,c1,c2);
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,1,
+ $a_7,$a_5); # mul_add_c2(a[7],b[5],c1,c2,c3);
+$code.=<<___;
$ST $c_3,11*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_3,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_6,$a_6 # mul_add_c(a[6],b[6],c1,c2,c3);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_6,$a_6); # mul_add_c(a[6],b[6],c1,c2,c3);
+$code.=<<___;
mflo $t_1
mfhi $t_2
$ADDU $c_1,$t_1
@@ -2375,21 +2094,10 @@ $code.=<<___;
sltu $at,$c_2,$t_2
$ADDU $c_3,$at
$ST $c_1,12*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_1,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_7,$a_7 # mul_add_c(a[7],b[7],c3,c1,c2);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_7,$a_7); # mul_add_c(a[7],b[7],c3,c1,c2);
+$code.=<<___;
$ST $c_2,13*$BNSZ($a0)
mflo $t_1
@@ -2457,21 +2165,10 @@ $code.=<<___;
sltu $at,$c_2,$t_1
$ADDU $c_3,$t_2,$at
$ST $c_2,$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_2,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_1,$a_1 # mul_add_c(a[1],b[1],c3,c1,c2);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_1,$a_1); # mul_add_c(a[1],b[1],c3,c1,c2);
+$code.=<<___;
mflo $t_1
mfhi $t_2
$ADDU $c_3,$t_1
@@ -2482,52 +2179,17 @@ $code.=<<___;
sltu $at,$c_1,$t_2
$ADDU $c_2,$at
$ST $c_3,2*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_3,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_1,$a_2 # mul_add_c(a2[1],b[2],c1,c2,c3);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
- mflo $t_1
- mfhi $t_2
- slt $at,$t_2,$zero
- $ADDU $c_3,$at
- $MULTU $a_3,$a_1 # mul_add_c2(a[3],b[1],c2,c3,c1);
- $SLL $t_2,1
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_1,$t_1
- sltu $at,$c_1,$t_1
- $ADDU $t_2,$at
- $ADDU $c_2,$t_2
- sltu $at,$c_2,$t_2
- $ADDU $c_3,$at
+___
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,0,
+ $a_1,$a_2); # mul_add_c2(a2[1],b[2],c1,c2,c3);
+ &add_c2($t_2,$t_1,$c_1,$c_2,$c_3,1,
+ $a_3,$a_1); # mul_add_c2(a[3],b[1],c2,c3,c1);
+$code.=<<___;
$ST $c_1,3*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_1,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_2,$a_2 # mul_add_c(a[2],b[2],c2,c3,c1);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_2,$t_1
- sltu $at,$c_2,$t_1
- $ADDU $t_2,$at
- $ADDU $c_3,$t_2
- sltu $at,$c_3,$t_2
- $ADDU $c_1,$at
+___
+ &add_c2($t_2,$t_1,$c_2,$c_3,$c_1,0,
+ $a_2,$a_2); # mul_add_c(a[2],b[2],c2,c3,c1);
+$code.=<<___;
mflo $t_1
mfhi $t_2
$ADDU $c_2,$t_1
@@ -2538,21 +2200,10 @@ $code.=<<___;
sltu $at,$c_3,$t_2
$ADDU $c_1,$at
$ST $c_2,4*$BNSZ($a0)
-
- mflo $t_1
- mfhi $t_2
- slt $c_2,$t_2,$zero
- $SLL $t_2,1
- $MULTU $a_3,$a_3 # mul_add_c(a[3],b[3],c1,c2,c3);
- slt $a2,$t_1,$zero
- $ADDU $t_2,$a2
- $SLL $t_1,1
- $ADDU $c_3,$t_1
- sltu $at,$c_3,$t_1
- $ADDU $t_2,$at
- $ADDU $c_1,$t_2
- sltu $at,$c_1,$t_2
- $ADDU $c_2,$at
+___
+ &add_c2($t_2,$t_1,$c_3,$c_1,$c_2,0,
+ $a_3,$a_3); # mul_add_c(a[3],b[3],c1,c2,c3);
+$code.=<<___;
$ST $c_3,5*$BNSZ($a0)
mflo $t_1
diff --git a/drivers/builtin_openssl2/crypto/bn/asm/s390x-gf2m.pl b/drivers/builtin_openssl2/crypto/bn/asm/s390x-gf2m.pl
index cd9f13eca2..9d18d40e77 100644
--- a/drivers/builtin_openssl2/crypto/bn/asm/s390x-gf2m.pl
+++ b/drivers/builtin_openssl2/crypto/bn/asm/s390x-gf2m.pl
@@ -172,19 +172,19 @@ ___
if ($SIZE_T==8) {
my @r=map("%r$_",(6..9));
$code.=<<___;
- bras $ra,_mul_1x1 # a1·b1
+ bras $ra,_mul_1x1 # a1·b1
stmg $lo,$hi,16($rp)
lg $a,`$stdframe+128+4*$SIZE_T`($sp)
lg $b,`$stdframe+128+6*$SIZE_T`($sp)
- bras $ra,_mul_1x1 # a0·b0
+ bras $ra,_mul_1x1 # a0·b0
stmg $lo,$hi,0($rp)
lg $a,`$stdframe+128+3*$SIZE_T`($sp)
lg $b,`$stdframe+128+5*$SIZE_T`($sp)
xg $a,`$stdframe+128+4*$SIZE_T`($sp)
xg $b,`$stdframe+128+6*$SIZE_T`($sp)
- bras $ra,_mul_1x1 # (a0+a1)·(b0+b1)
+ bras $ra,_mul_1x1 # (a0+a1)·(b0+b1)
lmg @r[0],@r[3],0($rp)
xgr $lo,$hi
diff --git a/drivers/builtin_openssl2/crypto/bn/asm/x86-gf2m.pl b/drivers/builtin_openssl2/crypto/bn/asm/x86-gf2m.pl
index 808a1e5969..b579530272 100644
--- a/drivers/builtin_openssl2/crypto/bn/asm/x86-gf2m.pl
+++ b/drivers/builtin_openssl2/crypto/bn/asm/x86-gf2m.pl
@@ -14,7 +14,7 @@
# the time being... Except that it has three code paths: pure integer
# code suitable for any x86 CPU, MMX code suitable for PIII and later
# and PCLMULQDQ suitable for Westmere and later. Improvement varies
-# from one benchmark and µ-arch to another. Below are interval values
+# from one benchmark and µ-arch to another. Below are interval values
# for 163- and 571-bit ECDH benchmarks relative to compiler-generated
# code:
#
@@ -226,22 +226,22 @@ if ($sse2) {
&push ("edi");
&mov ($a,&wparam(1));
&mov ($b,&wparam(3));
- &call ("_mul_1x1_mmx"); # a1·b1
+ &call ("_mul_1x1_mmx"); # a1·b1
&movq ("mm7",$R);
&mov ($a,&wparam(2));
&mov ($b,&wparam(4));
- &call ("_mul_1x1_mmx"); # a0·b0
+ &call ("_mul_1x1_mmx"); # a0·b0
&movq ("mm6",$R);
&mov ($a,&wparam(1));
&mov ($b,&wparam(3));
&xor ($a,&wparam(2));
&xor ($b,&wparam(4));
- &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1)
+ &call ("_mul_1x1_mmx"); # (a0+a1)·(b0+b1)
&pxor ($R,"mm7");
&mov ($a,&wparam(0));
- &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0
+ &pxor ($R,"mm6"); # (a0+a1)·(b0+b1)-a1·b1-a0·b0
&movq ($A,$R);
&psllq ($R,32);
@@ -266,13 +266,13 @@ if ($sse2) {
&mov ($a,&wparam(1));
&mov ($b,&wparam(3));
- &call ("_mul_1x1_ialu"); # a1·b1
+ &call ("_mul_1x1_ialu"); # a1·b1
&mov (&DWP(8,"esp"),$lo);
&mov (&DWP(12,"esp"),$hi);
&mov ($a,&wparam(2));
&mov ($b,&wparam(4));
- &call ("_mul_1x1_ialu"); # a0·b0
+ &call ("_mul_1x1_ialu"); # a0·b0
&mov (&DWP(0,"esp"),$lo);
&mov (&DWP(4,"esp"),$hi);
@@ -280,7 +280,7 @@ if ($sse2) {
&mov ($b,&wparam(3));
&xor ($a,&wparam(2));
&xor ($b,&wparam(4));
- &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1)
+ &call ("_mul_1x1_ialu"); # (a0+a1)·(b0+b1)
&mov ("ebp",&wparam(0));
@r=("ebx","ecx","edi","esi");
diff --git a/drivers/builtin_openssl2/crypto/bn/asm/x86_64-gcc.c b/drivers/builtin_openssl2/crypto/bn/asm/x86_64-gcc.c
index acb0b40118..0a5bb285a1 100644
--- a/drivers/builtin_openssl2/crypto/bn/asm/x86_64-gcc.c
+++ b/drivers/builtin_openssl2/crypto/bn/asm/x86_64-gcc.c
@@ -1,8 +1,8 @@
#include "../bn_lcl.h"
#if !(defined(__GNUC__) && __GNUC__>=2)
-# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
+# include "../bn_asm.c" /* kind of dirty hack for Sun Studio */
#else
-/*
+/*-
* x86_64 BIGNUM accelerator version 0.1, December 2002.
*
* Implemented by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
@@ -28,579 +28,609 @@
* Q. How much faster does it get?
* A. 'apps/openssl speed rsa dsa' output with no-asm:
*
- * sign verify sign/s verify/s
- * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
- * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
- * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
- * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
- * sign verify sign/s verify/s
- * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
- * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
- * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
+ * sign verify sign/s verify/s
+ * rsa 512 bits 0.0006s 0.0001s 1683.8 18456.2
+ * rsa 1024 bits 0.0028s 0.0002s 356.0 6407.0
+ * rsa 2048 bits 0.0172s 0.0005s 58.0 1957.8
+ * rsa 4096 bits 0.1155s 0.0018s 8.7 555.6
+ * sign verify sign/s verify/s
+ * dsa 512 bits 0.0005s 0.0006s 2100.8 1768.3
+ * dsa 1024 bits 0.0014s 0.0018s 692.3 559.2
+ * dsa 2048 bits 0.0049s 0.0061s 204.7 165.0
*
* 'apps/openssl speed rsa dsa' output with this module:
*
- * sign verify sign/s verify/s
- * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
- * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
- * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
- * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
- * sign verify sign/s verify/s
- * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
- * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
- * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
+ * sign verify sign/s verify/s
+ * rsa 512 bits 0.0004s 0.0000s 2767.1 33297.9
+ * rsa 1024 bits 0.0012s 0.0001s 867.4 14674.7
+ * rsa 2048 bits 0.0061s 0.0002s 164.0 5270.0
+ * rsa 4096 bits 0.0384s 0.0006s 26.1 1650.8
+ * sign verify sign/s verify/s
+ * dsa 512 bits 0.0002s 0.0003s 4442.2 3786.3
+ * dsa 1024 bits 0.0005s 0.0007s 1835.1 1497.4
+ * dsa 2048 bits 0.0016s 0.0020s 620.4 504.6
*
* For the reference. IA-32 assembler implementation performs
* very much like 64-bit code compiled with no-asm on the same
* machine.
*/
-#ifdef _WIN64
-#define BN_ULONG unsigned long long
-#else
-#define BN_ULONG unsigned long
-#endif
+# ifdef _WIN64
+# define BN_ULONG unsigned long long
+# else
+# define BN_ULONG unsigned long
+# endif
-#undef mul
-#undef mul_add
-#undef sqr
+# undef mul
+# undef mul_add
+# undef sqr
-/*
- * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
- * "g"(0) let the compiler to decide where does it
- * want to keep the value of zero;
+/*-
+ * "m"(a), "+m"(r) is the way to favor DirectPath µ-code;
+ * "g"(0) let the compiler to decide where does it
+ * want to keep the value of zero;
*/
-#define mul_add(r,a,word,carry) do { \
- register BN_ULONG high,low; \
- asm ("mulq %3" \
- : "=a"(low),"=d"(high) \
- : "a"(word),"m"(a) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+r"(carry),"+d"(high)\
- : "a"(low),"g"(0) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+m"(r),"+d"(high) \
- : "r"(carry),"g"(0) \
- : "cc"); \
- carry=high; \
- } while (0)
-
-#define mul(r,a,word,carry) do { \
- register BN_ULONG high,low; \
- asm ("mulq %3" \
- : "=a"(low),"=d"(high) \
- : "a"(word),"g"(a) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+r"(carry),"+d"(high)\
- : "a"(low),"g"(0) \
- : "cc"); \
- (r)=carry, carry=high; \
- } while (0)
-
-#define sqr(r0,r1,a) \
- asm ("mulq %2" \
- : "=a"(r0),"=d"(r1) \
- : "a"(a) \
- : "cc");
-
-BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
- {
- BN_ULONG c1=0;
-
- if (num <= 0) return(c1);
-
- while (num&~3)
- {
- mul_add(rp[0],ap[0],w,c1);
- mul_add(rp[1],ap[1],w,c1);
- mul_add(rp[2],ap[2],w,c1);
- mul_add(rp[3],ap[3],w,c1);
- ap+=4; rp+=4; num-=4;
- }
- if (num)
- {
- mul_add(rp[0],ap[0],w,c1); if (--num==0) return c1;
- mul_add(rp[1],ap[1],w,c1); if (--num==0) return c1;
- mul_add(rp[2],ap[2],w,c1); return c1;
- }
-
- return(c1);
- }
+# define mul_add(r,a,word,carry) do { \
+ register BN_ULONG high,low; \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(word),"m"(a) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry),"+d"(high)\
+ : "a"(low),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+m"(r),"+d"(high) \
+ : "r"(carry),"g"(0) \
+ : "cc"); \
+ carry=high; \
+ } while (0)
+
+# define mul(r,a,word,carry) do { \
+ register BN_ULONG high,low; \
+ asm ("mulq %3" \
+ : "=a"(low),"=d"(high) \
+ : "a"(word),"g"(a) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(carry),"+d"(high)\
+ : "a"(low),"g"(0) \
+ : "cc"); \
+ (r)=carry, carry=high; \
+ } while (0)
+
+# define sqr(r0,r1,a) \
+ asm ("mulq %2" \
+ : "=a"(r0),"=d"(r1) \
+ : "a"(a) \
+ : "cc");
+
+BN_ULONG bn_mul_add_words(BN_ULONG *rp, const BN_ULONG *ap, int num,
+ BN_ULONG w)
+{
+ BN_ULONG c1 = 0;
+
+ if (num <= 0)
+ return (c1);
+
+ while (num & ~3) {
+ mul_add(rp[0], ap[0], w, c1);
+ mul_add(rp[1], ap[1], w, c1);
+ mul_add(rp[2], ap[2], w, c1);
+ mul_add(rp[3], ap[3], w, c1);
+ ap += 4;
+ rp += 4;
+ num -= 4;
+ }
+ if (num) {
+ mul_add(rp[0], ap[0], w, c1);
+ if (--num == 0)
+ return c1;
+ mul_add(rp[1], ap[1], w, c1);
+ if (--num == 0)
+ return c1;
+ mul_add(rp[2], ap[2], w, c1);
+ return c1;
+ }
+
+ return (c1);
+}
BN_ULONG bn_mul_words(BN_ULONG *rp, const BN_ULONG *ap, int num, BN_ULONG w)
- {
- BN_ULONG c1=0;
-
- if (num <= 0) return(c1);
-
- while (num&~3)
- {
- mul(rp[0],ap[0],w,c1);
- mul(rp[1],ap[1],w,c1);
- mul(rp[2],ap[2],w,c1);
- mul(rp[3],ap[3],w,c1);
- ap+=4; rp+=4; num-=4;
- }
- if (num)
- {
- mul(rp[0],ap[0],w,c1); if (--num == 0) return c1;
- mul(rp[1],ap[1],w,c1); if (--num == 0) return c1;
- mul(rp[2],ap[2],w,c1);
- }
- return(c1);
- }
+{
+ BN_ULONG c1 = 0;
+
+ if (num <= 0)
+ return (c1);
+
+ while (num & ~3) {
+ mul(rp[0], ap[0], w, c1);
+ mul(rp[1], ap[1], w, c1);
+ mul(rp[2], ap[2], w, c1);
+ mul(rp[3], ap[3], w, c1);
+ ap += 4;
+ rp += 4;
+ num -= 4;
+ }
+ if (num) {
+ mul(rp[0], ap[0], w, c1);
+ if (--num == 0)
+ return c1;
+ mul(rp[1], ap[1], w, c1);
+ if (--num == 0)
+ return c1;
+ mul(rp[2], ap[2], w, c1);
+ }
+ return (c1);
+}
void bn_sqr_words(BN_ULONG *r, const BN_ULONG *a, int n)
- {
- if (n <= 0) return;
-
- while (n&~3)
- {
- sqr(r[0],r[1],a[0]);
- sqr(r[2],r[3],a[1]);
- sqr(r[4],r[5],a[2]);
- sqr(r[6],r[7],a[3]);
- a+=4; r+=8; n-=4;
- }
- if (n)
- {
- sqr(r[0],r[1],a[0]); if (--n == 0) return;
- sqr(r[2],r[3],a[1]); if (--n == 0) return;
- sqr(r[4],r[5],a[2]);
- }
- }
+{
+ if (n <= 0)
+ return;
+
+ while (n & ~3) {
+ sqr(r[0], r[1], a[0]);
+ sqr(r[2], r[3], a[1]);
+ sqr(r[4], r[5], a[2]);
+ sqr(r[6], r[7], a[3]);
+ a += 4;
+ r += 8;
+ n -= 4;
+ }
+ if (n) {
+ sqr(r[0], r[1], a[0]);
+ if (--n == 0)
+ return;
+ sqr(r[2], r[3], a[1]);
+ if (--n == 0)
+ return;
+ sqr(r[4], r[5], a[2]);
+ }
+}
BN_ULONG bn_div_words(BN_ULONG h, BN_ULONG l, BN_ULONG d)
-{ BN_ULONG ret,waste;
+{
+ BN_ULONG ret, waste;
- asm ("divq %4"
- : "=a"(ret),"=d"(waste)
- : "a"(l),"d"(h),"g"(d)
- : "cc");
+ asm("divq %4":"=a"(ret), "=d"(waste)
+ : "a"(l), "d"(h), "g"(d)
+ : "cc");
- return ret;
+ return ret;
}
-BN_ULONG bn_add_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n)
-{ BN_ULONG ret=0,i=0;
-
- if (n <= 0) return 0;
-
- asm (
- " subq %2,%2 \n"
- ".p2align 4 \n"
- "1: movq (%4,%2,8),%0 \n"
- " adcq (%5,%2,8),%0 \n"
- " movq %0,(%3,%2,8) \n"
- " leaq 1(%2),%2 \n"
- " loop 1b \n"
- " sbbq %0,%0 \n"
- : "=&a"(ret),"+c"(n),"=&r"(i)
- : "r"(rp),"r"(ap),"r"(bp)
- : "cc"
- );
-
- return ret&1;
+BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
+ int n)
+{
+ BN_ULONG ret = 0, i = 0;
+
+ if (n <= 0)
+ return 0;
+
+ asm volatile (" subq %2,%2 \n"
+ ".p2align 4 \n"
+ "1: movq (%4,%2,8),%0 \n"
+ " adcq (%5,%2,8),%0 \n"
+ " movq %0,(%3,%2,8) \n"
+ " leaq 1(%2),%2 \n"
+ " loop 1b \n"
+ " sbbq %0,%0 \n":"=&a" (ret), "+c"(n),
+ "=&r"(i)
+ :"r"(rp), "r"(ap), "r"(bp)
+ :"cc", "memory");
+
+ return ret & 1;
}
-#ifndef SIMICS
-BN_ULONG bn_sub_words (BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,int n)
-{ BN_ULONG ret=0,i=0;
-
- if (n <= 0) return 0;
-
- asm (
- " subq %2,%2 \n"
- ".p2align 4 \n"
- "1: movq (%4,%2,8),%0 \n"
- " sbbq (%5,%2,8),%0 \n"
- " movq %0,(%3,%2,8) \n"
- " leaq 1(%2),%2 \n"
- " loop 1b \n"
- " sbbq %0,%0 \n"
- : "=&a"(ret),"+c"(n),"=&r"(i)
- : "r"(rp),"r"(ap),"r"(bp)
- : "cc"
- );
-
- return ret&1;
+# ifndef SIMICS
+BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
+ int n)
+{
+ BN_ULONG ret = 0, i = 0;
+
+ if (n <= 0)
+ return 0;
+
+ asm volatile (" subq %2,%2 \n"
+ ".p2align 4 \n"
+ "1: movq (%4,%2,8),%0 \n"
+ " sbbq (%5,%2,8),%0 \n"
+ " movq %0,(%3,%2,8) \n"
+ " leaq 1(%2),%2 \n"
+ " loop 1b \n"
+ " sbbq %0,%0 \n":"=&a" (ret), "+c"(n),
+ "=&r"(i)
+ :"r"(rp), "r"(ap), "r"(bp)
+ :"cc", "memory");
+
+ return ret & 1;
}
-#else
+# else
/* Simics 1.4<7 has buggy sbbq:-( */
-#define BN_MASK2 0xffffffffffffffffL
+# define BN_MASK2 0xffffffffffffffffL
BN_ULONG bn_sub_words(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b, int n)
- {
- BN_ULONG t1,t2;
- int c=0;
-
- if (n <= 0) return((BN_ULONG)0);
-
- for (;;)
- {
- t1=a[0]; t2=b[0];
- r[0]=(t1-t2-c)&BN_MASK2;
- if (t1 != t2) c=(t1 < t2);
- if (--n <= 0) break;
-
- t1=a[1]; t2=b[1];
- r[1]=(t1-t2-c)&BN_MASK2;
- if (t1 != t2) c=(t1 < t2);
- if (--n <= 0) break;
-
- t1=a[2]; t2=b[2];
- r[2]=(t1-t2-c)&BN_MASK2;
- if (t1 != t2) c=(t1 < t2);
- if (--n <= 0) break;
-
- t1=a[3]; t2=b[3];
- r[3]=(t1-t2-c)&BN_MASK2;
- if (t1 != t2) c=(t1 < t2);
- if (--n <= 0) break;
-
- a+=4;
- b+=4;
- r+=4;
- }
- return(c);
- }
-#endif
+{
+ BN_ULONG t1, t2;
+ int c = 0;
+
+ if (n <= 0)
+ return ((BN_ULONG)0);
+
+ for (;;) {
+ t1 = a[0];
+ t2 = b[0];
+ r[0] = (t1 - t2 - c) & BN_MASK2;
+ if (t1 != t2)
+ c = (t1 < t2);
+ if (--n <= 0)
+ break;
+
+ t1 = a[1];
+ t2 = b[1];
+ r[1] = (t1 - t2 - c) & BN_MASK2;
+ if (t1 != t2)
+ c = (t1 < t2);
+ if (--n <= 0)
+ break;
+
+ t1 = a[2];
+ t2 = b[2];
+ r[2] = (t1 - t2 - c) & BN_MASK2;
+ if (t1 != t2)
+ c = (t1 < t2);
+ if (--n <= 0)
+ break;
+
+ t1 = a[3];
+ t2 = b[3];
+ r[3] = (t1 - t2 - c) & BN_MASK2;
+ if (t1 != t2)
+ c = (t1 < t2);
+ if (--n <= 0)
+ break;
+
+ a += 4;
+ b += 4;
+ r += 4;
+ }
+ return (c);
+}
+# endif
/* mul_add_c(a,b,c0,c1,c2) -- c+=a*b for three word number c=(c2,c1,c0) */
/* mul_add_c2(a,b,c0,c1,c2) -- c+=2*a*b for three word number c=(c2,c1,c0) */
/* sqr_add_c(a,i,c0,c1,c2) -- c+=a[i]^2 for three word number c=(c2,c1,c0) */
-/* sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number c=(c2,c1,c0) */
+/*
+ * sqr_add_c2(a,i,c0,c1,c2) -- c+=2*a[i]*a[j] for three word number
+ * c=(c2,c1,c0)
+ */
-#if 0
+/*
+ * Keep in mind that carrying into high part of multiplication result
+ * can not overflow, because it cannot be all-ones.
+ */
+# if 0
/* original macros are kept for reference purposes */
-#define mul_add_c(a,b,c0,c1,c2) { \
- BN_ULONG ta=(a),tb=(b); \
- t1 = ta * tb; \
- t2 = BN_UMULT_HIGH(ta,tb); \
- c0 += t1; t2 += (c0<t1)?1:0; \
- c1 += t2; c2 += (c1<t2)?1:0; \
- }
-
-#define mul_add_c2(a,b,c0,c1,c2) { \
- BN_ULONG ta=(a),tb=(b),t0; \
- t1 = BN_UMULT_HIGH(ta,tb); \
- t0 = ta * tb; \
- t2 = t1+t1; c2 += (t2<t1)?1:0; \
- t1 = t0+t0; t2 += (t1<t0)?1:0; \
- c0 += t1; t2 += (c0<t1)?1:0; \
- c1 += t2; c2 += (c1<t2)?1:0; \
- }
-#else
-#define mul_add_c(a,b,c0,c1,c2) do { \
- asm ("mulq %3" \
- : "=a"(t1),"=d"(t2) \
- : "a"(a),"m"(b) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+r"(c0),"+d"(t2) \
- : "a"(t1),"g"(0) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+r"(c1),"+r"(c2) \
- : "d"(t2),"g"(0) \
- : "cc"); \
- } while (0)
-
-#define sqr_add_c(a,i,c0,c1,c2) do { \
- asm ("mulq %2" \
- : "=a"(t1),"=d"(t2) \
- : "a"(a[i]) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+r"(c0),"+d"(t2) \
- : "a"(t1),"g"(0) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+r"(c1),"+r"(c2) \
- : "d"(t2),"g"(0) \
- : "cc"); \
- } while (0)
-
-#define mul_add_c2(a,b,c0,c1,c2) do { \
- asm ("mulq %3" \
- : "=a"(t1),"=d"(t2) \
- : "a"(a),"m"(b) \
- : "cc"); \
- asm ("addq %0,%0; adcq %2,%1" \
- : "+d"(t2),"+r"(c2) \
- : "g"(0) \
- : "cc"); \
- asm ("addq %0,%0; adcq %2,%1" \
- : "+a"(t1),"+d"(t2) \
- : "g"(0) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+r"(c0),"+d"(t2) \
- : "a"(t1),"g"(0) \
- : "cc"); \
- asm ("addq %2,%0; adcq %3,%1" \
- : "+r"(c1),"+r"(c2) \
- : "d"(t2),"g"(0) \
- : "cc"); \
- } while (0)
-#endif
-
-#define sqr_add_c2(a,i,j,c0,c1,c2) \
- mul_add_c2((a)[i],(a)[j],c0,c1,c2)
+# define mul_add_c(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b); \
+ t1 = ta * tb; \
+ t2 = BN_UMULT_HIGH(ta,tb); \
+ c0 += t1; t2 += (c0<t1)?1:0; \
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ }
+
+# define mul_add_c2(a,b,c0,c1,c2) { \
+ BN_ULONG ta=(a),tb=(b),t0; \
+ t1 = BN_UMULT_HIGH(ta,tb); \
+ t0 = ta * tb; \
+ c0 += t0; t2 = t1+((c0<t0)?1:0);\
+ c1 += t2; c2 += (c1<t2)?1:0; \
+ c0 += t0; t1 += (c0<t0)?1:0; \
+ c1 += t1; c2 += (c1<t1)?1:0; \
+ }
+# else
+# define mul_add_c(a,b,c0,c1,c2) do { \
+ asm ("mulq %3" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a),"m"(b) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c0),"+d"(t2) \
+ : "a"(t1),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c1),"+r"(c2) \
+ : "d"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+
+# define sqr_add_c(a,i,c0,c1,c2) do { \
+ asm ("mulq %2" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a[i]) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c0),"+d"(t2) \
+ : "a"(t1),"g"(0) \
+ : "cc"); \
+ asm ("addq %2,%0; adcq %3,%1" \
+ : "+r"(c1),"+r"(c2) \
+ : "d"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+
+# define mul_add_c2(a,b,c0,c1,c2) do { \
+ asm ("mulq %3" \
+ : "=a"(t1),"=d"(t2) \
+ : "a"(a),"m"(b) \
+ : "cc"); \
+ asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0),"+r"(c1),"+r"(c2) \
+ : "r"(t1),"r"(t2),"g"(0) \
+ : "cc"); \
+ asm ("addq %3,%0; adcq %4,%1; adcq %5,%2" \
+ : "+r"(c0),"+r"(c1),"+r"(c2) \
+ : "r"(t1),"r"(t2),"g"(0) \
+ : "cc"); \
+ } while (0)
+# endif
+
+# define sqr_add_c2(a,i,j,c0,c1,c2) \
+ mul_add_c2((a)[i],(a)[j],c0,c1,c2)
void bn_mul_comba8(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
- {
- BN_ULONG t1,t2;
- BN_ULONG c1,c2,c3;
-
- c1=0;
- c2=0;
- c3=0;
- mul_add_c(a[0],b[0],c1,c2,c3);
- r[0]=c1;
- c1=0;
- mul_add_c(a[0],b[1],c2,c3,c1);
- mul_add_c(a[1],b[0],c2,c3,c1);
- r[1]=c2;
- c2=0;
- mul_add_c(a[2],b[0],c3,c1,c2);
- mul_add_c(a[1],b[1],c3,c1,c2);
- mul_add_c(a[0],b[2],c3,c1,c2);
- r[2]=c3;
- c3=0;
- mul_add_c(a[0],b[3],c1,c2,c3);
- mul_add_c(a[1],b[2],c1,c2,c3);
- mul_add_c(a[2],b[1],c1,c2,c3);
- mul_add_c(a[3],b[0],c1,c2,c3);
- r[3]=c1;
- c1=0;
- mul_add_c(a[4],b[0],c2,c3,c1);
- mul_add_c(a[3],b[1],c2,c3,c1);
- mul_add_c(a[2],b[2],c2,c3,c1);
- mul_add_c(a[1],b[3],c2,c3,c1);
- mul_add_c(a[0],b[4],c2,c3,c1);
- r[4]=c2;
- c2=0;
- mul_add_c(a[0],b[5],c3,c1,c2);
- mul_add_c(a[1],b[4],c3,c1,c2);
- mul_add_c(a[2],b[3],c3,c1,c2);
- mul_add_c(a[3],b[2],c3,c1,c2);
- mul_add_c(a[4],b[1],c3,c1,c2);
- mul_add_c(a[5],b[0],c3,c1,c2);
- r[5]=c3;
- c3=0;
- mul_add_c(a[6],b[0],c1,c2,c3);
- mul_add_c(a[5],b[1],c1,c2,c3);
- mul_add_c(a[4],b[2],c1,c2,c3);
- mul_add_c(a[3],b[3],c1,c2,c3);
- mul_add_c(a[2],b[4],c1,c2,c3);
- mul_add_c(a[1],b[5],c1,c2,c3);
- mul_add_c(a[0],b[6],c1,c2,c3);
- r[6]=c1;
- c1=0;
- mul_add_c(a[0],b[7],c2,c3,c1);
- mul_add_c(a[1],b[6],c2,c3,c1);
- mul_add_c(a[2],b[5],c2,c3,c1);
- mul_add_c(a[3],b[4],c2,c3,c1);
- mul_add_c(a[4],b[3],c2,c3,c1);
- mul_add_c(a[5],b[2],c2,c3,c1);
- mul_add_c(a[6],b[1],c2,c3,c1);
- mul_add_c(a[7],b[0],c2,c3,c1);
- r[7]=c2;
- c2=0;
- mul_add_c(a[7],b[1],c3,c1,c2);
- mul_add_c(a[6],b[2],c3,c1,c2);
- mul_add_c(a[5],b[3],c3,c1,c2);
- mul_add_c(a[4],b[4],c3,c1,c2);
- mul_add_c(a[3],b[5],c3,c1,c2);
- mul_add_c(a[2],b[6],c3,c1,c2);
- mul_add_c(a[1],b[7],c3,c1,c2);
- r[8]=c3;
- c3=0;
- mul_add_c(a[2],b[7],c1,c2,c3);
- mul_add_c(a[3],b[6],c1,c2,c3);
- mul_add_c(a[4],b[5],c1,c2,c3);
- mul_add_c(a[5],b[4],c1,c2,c3);
- mul_add_c(a[6],b[3],c1,c2,c3);
- mul_add_c(a[7],b[2],c1,c2,c3);
- r[9]=c1;
- c1=0;
- mul_add_c(a[7],b[3],c2,c3,c1);
- mul_add_c(a[6],b[4],c2,c3,c1);
- mul_add_c(a[5],b[5],c2,c3,c1);
- mul_add_c(a[4],b[6],c2,c3,c1);
- mul_add_c(a[3],b[7],c2,c3,c1);
- r[10]=c2;
- c2=0;
- mul_add_c(a[4],b[7],c3,c1,c2);
- mul_add_c(a[5],b[6],c3,c1,c2);
- mul_add_c(a[6],b[5],c3,c1,c2);
- mul_add_c(a[7],b[4],c3,c1,c2);
- r[11]=c3;
- c3=0;
- mul_add_c(a[7],b[5],c1,c2,c3);
- mul_add_c(a[6],b[6],c1,c2,c3);
- mul_add_c(a[5],b[7],c1,c2,c3);
- r[12]=c1;
- c1=0;
- mul_add_c(a[6],b[7],c2,c3,c1);
- mul_add_c(a[7],b[6],c2,c3,c1);
- r[13]=c2;
- c2=0;
- mul_add_c(a[7],b[7],c3,c1,c2);
- r[14]=c3;
- r[15]=c1;
- }
+{
+ BN_ULONG t1, t2;
+ BN_ULONG c1, c2, c3;
+
+ c1 = 0;
+ c2 = 0;
+ c3 = 0;
+ mul_add_c(a[0], b[0], c1, c2, c3);
+ r[0] = c1;
+ c1 = 0;
+ mul_add_c(a[0], b[1], c2, c3, c1);
+ mul_add_c(a[1], b[0], c2, c3, c1);
+ r[1] = c2;
+ c2 = 0;
+ mul_add_c(a[2], b[0], c3, c1, c2);
+ mul_add_c(a[1], b[1], c3, c1, c2);
+ mul_add_c(a[0], b[2], c3, c1, c2);
+ r[2] = c3;
+ c3 = 0;
+ mul_add_c(a[0], b[3], c1, c2, c3);
+ mul_add_c(a[1], b[2], c1, c2, c3);
+ mul_add_c(a[2], b[1], c1, c2, c3);
+ mul_add_c(a[3], b[0], c1, c2, c3);
+ r[3] = c1;
+ c1 = 0;
+ mul_add_c(a[4], b[0], c2, c3, c1);
+ mul_add_c(a[3], b[1], c2, c3, c1);
+ mul_add_c(a[2], b[2], c2, c3, c1);
+ mul_add_c(a[1], b[3], c2, c3, c1);
+ mul_add_c(a[0], b[4], c2, c3, c1);
+ r[4] = c2;
+ c2 = 0;
+ mul_add_c(a[0], b[5], c3, c1, c2);
+ mul_add_c(a[1], b[4], c3, c1, c2);
+ mul_add_c(a[2], b[3], c3, c1, c2);
+ mul_add_c(a[3], b[2], c3, c1, c2);
+ mul_add_c(a[4], b[1], c3, c1, c2);
+ mul_add_c(a[5], b[0], c3, c1, c2);
+ r[5] = c3;
+ c3 = 0;
+ mul_add_c(a[6], b[0], c1, c2, c3);
+ mul_add_c(a[5], b[1], c1, c2, c3);
+ mul_add_c(a[4], b[2], c1, c2, c3);
+ mul_add_c(a[3], b[3], c1, c2, c3);
+ mul_add_c(a[2], b[4], c1, c2, c3);
+ mul_add_c(a[1], b[5], c1, c2, c3);
+ mul_add_c(a[0], b[6], c1, c2, c3);
+ r[6] = c1;
+ c1 = 0;
+ mul_add_c(a[0], b[7], c2, c3, c1);
+ mul_add_c(a[1], b[6], c2, c3, c1);
+ mul_add_c(a[2], b[5], c2, c3, c1);
+ mul_add_c(a[3], b[4], c2, c3, c1);
+ mul_add_c(a[4], b[3], c2, c3, c1);
+ mul_add_c(a[5], b[2], c2, c3, c1);
+ mul_add_c(a[6], b[1], c2, c3, c1);
+ mul_add_c(a[7], b[0], c2, c3, c1);
+ r[7] = c2;
+ c2 = 0;
+ mul_add_c(a[7], b[1], c3, c1, c2);
+ mul_add_c(a[6], b[2], c3, c1, c2);
+ mul_add_c(a[5], b[3], c3, c1, c2);
+ mul_add_c(a[4], b[4], c3, c1, c2);
+ mul_add_c(a[3], b[5], c3, c1, c2);
+ mul_add_c(a[2], b[6], c3, c1, c2);
+ mul_add_c(a[1], b[7], c3, c1, c2);
+ r[8] = c3;
+ c3 = 0;
+ mul_add_c(a[2], b[7], c1, c2, c3);
+ mul_add_c(a[3], b[6], c1, c2, c3);
+ mul_add_c(a[4], b[5], c1, c2, c3);
+ mul_add_c(a[5], b[4], c1, c2, c3);
+ mul_add_c(a[6], b[3], c1, c2, c3);
+ mul_add_c(a[7], b[2], c1, c2, c3);
+ r[9] = c1;
+ c1 = 0;
+ mul_add_c(a[7], b[3], c2, c3, c1);
+ mul_add_c(a[6], b[4], c2, c3, c1);
+ mul_add_c(a[5], b[5], c2, c3, c1);
+ mul_add_c(a[4], b[6], c2, c3, c1);
+ mul_add_c(a[3], b[7], c2, c3, c1);
+ r[10] = c2;
+ c2 = 0;
+ mul_add_c(a[4], b[7], c3, c1, c2);
+ mul_add_c(a[5], b[6], c3, c1, c2);
+ mul_add_c(a[6], b[5], c3, c1, c2);
+ mul_add_c(a[7], b[4], c3, c1, c2);
+ r[11] = c3;
+ c3 = 0;
+ mul_add_c(a[7], b[5], c1, c2, c3);
+ mul_add_c(a[6], b[6], c1, c2, c3);
+ mul_add_c(a[5], b[7], c1, c2, c3);
+ r[12] = c1;
+ c1 = 0;
+ mul_add_c(a[6], b[7], c2, c3, c1);
+ mul_add_c(a[7], b[6], c2, c3, c1);
+ r[13] = c2;
+ c2 = 0;
+ mul_add_c(a[7], b[7], c3, c1, c2);
+ r[14] = c3;
+ r[15] = c1;
+}
void bn_mul_comba4(BN_ULONG *r, BN_ULONG *a, BN_ULONG *b)
- {
- BN_ULONG t1,t2;
- BN_ULONG c1,c2,c3;
-
- c1=0;
- c2=0;
- c3=0;
- mul_add_c(a[0],b[0],c1,c2,c3);
- r[0]=c1;
- c1=0;
- mul_add_c(a[0],b[1],c2,c3,c1);
- mul_add_c(a[1],b[0],c2,c3,c1);
- r[1]=c2;
- c2=0;
- mul_add_c(a[2],b[0],c3,c1,c2);
- mul_add_c(a[1],b[1],c3,c1,c2);
- mul_add_c(a[0],b[2],c3,c1,c2);
- r[2]=c3;
- c3=0;
- mul_add_c(a[0],b[3],c1,c2,c3);
- mul_add_c(a[1],b[2],c1,c2,c3);
- mul_add_c(a[2],b[1],c1,c2,c3);
- mul_add_c(a[3],b[0],c1,c2,c3);
- r[3]=c1;
- c1=0;
- mul_add_c(a[3],b[1],c2,c3,c1);
- mul_add_c(a[2],b[2],c2,c3,c1);
- mul_add_c(a[1],b[3],c2,c3,c1);
- r[4]=c2;
- c2=0;
- mul_add_c(a[2],b[3],c3,c1,c2);
- mul_add_c(a[3],b[2],c3,c1,c2);
- r[5]=c3;
- c3=0;
- mul_add_c(a[3],b[3],c1,c2,c3);
- r[6]=c1;
- r[7]=c2;
- }
+{
+ BN_ULONG t1, t2;
+ BN_ULONG c1, c2, c3;
+
+ c1 = 0;
+ c2 = 0;
+ c3 = 0;
+ mul_add_c(a[0], b[0], c1, c2, c3);
+ r[0] = c1;
+ c1 = 0;
+ mul_add_c(a[0], b[1], c2, c3, c1);
+ mul_add_c(a[1], b[0], c2, c3, c1);
+ r[1] = c2;
+ c2 = 0;
+ mul_add_c(a[2], b[0], c3, c1, c2);
+ mul_add_c(a[1], b[1], c3, c1, c2);
+ mul_add_c(a[0], b[2], c3, c1, c2);
+ r[2] = c3;
+ c3 = 0;
+ mul_add_c(a[0], b[3], c1, c2, c3);
+ mul_add_c(a[1], b[2], c1, c2, c3);
+ mul_add_c(a[2], b[1], c1, c2, c3);
+ mul_add_c(a[3], b[0], c1, c2, c3);
+ r[3] = c1;
+ c1 = 0;
+ mul_add_c(a[3], b[1], c2, c3, c1);
+ mul_add_c(a[2], b[2], c2, c3, c1);
+ mul_add_c(a[1], b[3], c2, c3, c1);
+ r[4] = c2;
+ c2 = 0;
+ mul_add_c(a[2], b[3], c3, c1, c2);
+ mul_add_c(a[3], b[2], c3, c1, c2);
+ r[5] = c3;
+ c3 = 0;
+ mul_add_c(a[3], b[3], c1, c2, c3);
+ r[6] = c1;
+ r[7] = c2;
+}
void bn_sqr_comba8(BN_ULONG *r, const BN_ULONG *a)
- {
- BN_ULONG t1,t2;
- BN_ULONG c1,c2,c3;
-
- c1=0;
- c2=0;
- c3=0;
- sqr_add_c(a,0,c1,c2,c3);
- r[0]=c1;
- c1=0;
- sqr_add_c2(a,1,0,c2,c3,c1);
- r[1]=c2;
- c2=0;
- sqr_add_c(a,1,c3,c1,c2);
- sqr_add_c2(a,2,0,c3,c1,c2);
- r[2]=c3;
- c3=0;
- sqr_add_c2(a,3,0,c1,c2,c3);
- sqr_add_c2(a,2,1,c1,c2,c3);
- r[3]=c1;
- c1=0;
- sqr_add_c(a,2,c2,c3,c1);
- sqr_add_c2(a,3,1,c2,c3,c1);
- sqr_add_c2(a,4,0,c2,c3,c1);
- r[4]=c2;
- c2=0;
- sqr_add_c2(a,5,0,c3,c1,c2);
- sqr_add_c2(a,4,1,c3,c1,c2);
- sqr_add_c2(a,3,2,c3,c1,c2);
- r[5]=c3;
- c3=0;
- sqr_add_c(a,3,c1,c2,c3);
- sqr_add_c2(a,4,2,c1,c2,c3);
- sqr_add_c2(a,5,1,c1,c2,c3);
- sqr_add_c2(a,6,0,c1,c2,c3);
- r[6]=c1;
- c1=0;
- sqr_add_c2(a,7,0,c2,c3,c1);
- sqr_add_c2(a,6,1,c2,c3,c1);
- sqr_add_c2(a,5,2,c2,c3,c1);
- sqr_add_c2(a,4,3,c2,c3,c1);
- r[7]=c2;
- c2=0;
- sqr_add_c(a,4,c3,c1,c2);
- sqr_add_c2(a,5,3,c3,c1,c2);
- sqr_add_c2(a,6,2,c3,c1,c2);
- sqr_add_c2(a,7,1,c3,c1,c2);
- r[8]=c3;
- c3=0;
- sqr_add_c2(a,7,2,c1,c2,c3);
- sqr_add_c2(a,6,3,c1,c2,c3);
- sqr_add_c2(a,5,4,c1,c2,c3);
- r[9]=c1;
- c1=0;
- sqr_add_c(a,5,c2,c3,c1);
- sqr_add_c2(a,6,4,c2,c3,c1);
- sqr_add_c2(a,7,3,c2,c3,c1);
- r[10]=c2;
- c2=0;
- sqr_add_c2(a,7,4,c3,c1,c2);
- sqr_add_c2(a,6,5,c3,c1,c2);
- r[11]=c3;
- c3=0;
- sqr_add_c(a,6,c1,c2,c3);
- sqr_add_c2(a,7,5,c1,c2,c3);
- r[12]=c1;
- c1=0;
- sqr_add_c2(a,7,6,c2,c3,c1);
- r[13]=c2;
- c2=0;
- sqr_add_c(a,7,c3,c1,c2);
- r[14]=c3;
- r[15]=c1;
- }
+{
+ BN_ULONG t1, t2;
+ BN_ULONG c1, c2, c3;
+
+ c1 = 0;
+ c2 = 0;
+ c3 = 0;
+ sqr_add_c(a, 0, c1, c2, c3);
+ r[0] = c1;
+ c1 = 0;
+ sqr_add_c2(a, 1, 0, c2, c3, c1);
+ r[1] = c2;
+ c2 = 0;
+ sqr_add_c(a, 1, c3, c1, c2);
+ sqr_add_c2(a, 2, 0, c3, c1, c2);
+ r[2] = c3;
+ c3 = 0;
+ sqr_add_c2(a, 3, 0, c1, c2, c3);
+ sqr_add_c2(a, 2, 1, c1, c2, c3);
+ r[3] = c1;
+ c1 = 0;
+ sqr_add_c(a, 2, c2, c3, c1);
+ sqr_add_c2(a, 3, 1, c2, c3, c1);
+ sqr_add_c2(a, 4, 0, c2, c3, c1);
+ r[4] = c2;
+ c2 = 0;
+ sqr_add_c2(a, 5, 0, c3, c1, c2);
+ sqr_add_c2(a, 4, 1, c3, c1, c2);
+ sqr_add_c2(a, 3, 2, c3, c1, c2);
+ r[5] = c3;
+ c3 = 0;
+ sqr_add_c(a, 3, c1, c2, c3);
+ sqr_add_c2(a, 4, 2, c1, c2, c3);
+ sqr_add_c2(a, 5, 1, c1, c2, c3);
+ sqr_add_c2(a, 6, 0, c1, c2, c3);
+ r[6] = c1;
+ c1 = 0;
+ sqr_add_c2(a, 7, 0, c2, c3, c1);
+ sqr_add_c2(a, 6, 1, c2, c3, c1);
+ sqr_add_c2(a, 5, 2, c2, c3, c1);
+ sqr_add_c2(a, 4, 3, c2, c3, c1);
+ r[7] = c2;
+ c2 = 0;
+ sqr_add_c(a, 4, c3, c1, c2);
+ sqr_add_c2(a, 5, 3, c3, c1, c2);
+ sqr_add_c2(a, 6, 2, c3, c1, c2);
+ sqr_add_c2(a, 7, 1, c3, c1, c2);
+ r[8] = c3;
+ c3 = 0;
+ sqr_add_c2(a, 7, 2, c1, c2, c3);
+ sqr_add_c2(a, 6, 3, c1, c2, c3);
+ sqr_add_c2(a, 5, 4, c1, c2, c3);
+ r[9] = c1;
+ c1 = 0;
+ sqr_add_c(a, 5, c2, c3, c1);
+ sqr_add_c2(a, 6, 4, c2, c3, c1);
+ sqr_add_c2(a, 7, 3, c2, c3, c1);
+ r[10] = c2;
+ c2 = 0;
+ sqr_add_c2(a, 7, 4, c3, c1, c2);
+ sqr_add_c2(a, 6, 5, c3, c1, c2);
+ r[11] = c3;
+ c3 = 0;
+ sqr_add_c(a, 6, c1, c2, c3);
+ sqr_add_c2(a, 7, 5, c1, c2, c3);
+ r[12] = c1;
+ c1 = 0;
+ sqr_add_c2(a, 7, 6, c2, c3, c1);
+ r[13] = c2;
+ c2 = 0;
+ sqr_add_c(a, 7, c3, c1, c2);
+ r[14] = c3;
+ r[15] = c1;
+}
void bn_sqr_comba4(BN_ULONG *r, const BN_ULONG *a)
- {
- BN_ULONG t1,t2;
- BN_ULONG c1,c2,c3;
-
- c1=0;
- c2=0;
- c3=0;
- sqr_add_c(a,0,c1,c2,c3);
- r[0]=c1;
- c1=0;
- sqr_add_c2(a,1,0,c2,c3,c1);
- r[1]=c2;
- c2=0;
- sqr_add_c(a,1,c3,c1,c2);
- sqr_add_c2(a,2,0,c3,c1,c2);
- r[2]=c3;
- c3=0;
- sqr_add_c2(a,3,0,c1,c2,c3);
- sqr_add_c2(a,2,1,c1,c2,c3);
- r[3]=c1;
- c1=0;
- sqr_add_c(a,2,c2,c3,c1);
- sqr_add_c2(a,3,1,c2,c3,c1);
- r[4]=c2;
- c2=0;
- sqr_add_c2(a,3,2,c3,c1,c2);
- r[5]=c3;
- c3=0;
- sqr_add_c(a,3,c1,c2,c3);
- r[6]=c1;
- r[7]=c2;
- }
+{
+ BN_ULONG t1, t2;
+ BN_ULONG c1, c2, c3;
+
+ c1 = 0;
+ c2 = 0;
+ c3 = 0;
+ sqr_add_c(a, 0, c1, c2, c3);
+ r[0] = c1;
+ c1 = 0;
+ sqr_add_c2(a, 1, 0, c2, c3, c1);
+ r[1] = c2;
+ c2 = 0;
+ sqr_add_c(a, 1, c3, c1, c2);
+ sqr_add_c2(a, 2, 0, c3, c1, c2);
+ r[2] = c3;
+ c3 = 0;
+ sqr_add_c2(a, 3, 0, c1, c2, c3);
+ sqr_add_c2(a, 2, 1, c1, c2, c3);
+ r[3] = c1;
+ c1 = 0;
+ sqr_add_c(a, 2, c2, c3, c1);
+ sqr_add_c2(a, 3, 1, c2, c3, c1);
+ r[4] = c2;
+ c2 = 0;
+ sqr_add_c2(a, 3, 2, c3, c1, c2);
+ r[5] = c3;
+ c3 = 0;
+ sqr_add_c(a, 3, c1, c2, c3);
+ r[6] = c1;
+ r[7] = c2;
+}
#endif
diff --git a/drivers/builtin_openssl2/crypto/bn/asm/x86_64-gf2m.pl b/drivers/builtin_openssl2/crypto/bn/asm/x86_64-gf2m.pl
index 226c66c35e..42bbec2fb7 100644
--- a/drivers/builtin_openssl2/crypto/bn/asm/x86_64-gf2m.pl
+++ b/drivers/builtin_openssl2/crypto/bn/asm/x86_64-gf2m.pl
@@ -13,7 +13,7 @@
# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
# the time being... Except that it has two code paths: code suitable
# for any x86_64 CPU and PCLMULQDQ one suitable for Westmere and
-# later. Improvement varies from one benchmark and µ-arch to another.
+# later. Improvement varies from one benchmark and µ-arch to another.
# Vanilla code path is at most 20% faster than compiler-generated code
# [not very impressive], while PCLMULQDQ - whole 85%-160% better on
# 163- and 571-bit ECDH benchmarks on Intel CPUs. Keep in mind that
@@ -184,13 +184,13 @@ ___
$code.=<<___;
movdqa %xmm0,%xmm4
movdqa %xmm1,%xmm5
- pclmulqdq \$0,%xmm1,%xmm0 # a1·b1
+ pclmulqdq \$0,%xmm1,%xmm0 # a1·b1
pxor %xmm2,%xmm4
pxor %xmm3,%xmm5
- pclmulqdq \$0,%xmm3,%xmm2 # a0·b0
- pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
+ pclmulqdq \$0,%xmm3,%xmm2 # a0·b0
+ pclmulqdq \$0,%xmm5,%xmm4 # (a0+a1)·(b0+b1)
xorps %xmm0,%xmm4
- xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1
+ xorps %xmm2,%xmm4 # (a0+a1)·(b0+b1)-a0·b0-a1·b1
movdqa %xmm4,%xmm5
pslldq \$8,%xmm4
psrldq \$8,%xmm5
@@ -225,13 +225,13 @@ $code.=<<___;
mov \$0xf,$mask
mov $a1,$a
mov $b1,$b
- call _mul_1x1 # a1·b1
+ call _mul_1x1 # a1·b1
mov $lo,16(%rsp)
mov $hi,24(%rsp)
mov 48(%rsp),$a
mov 64(%rsp),$b
- call _mul_1x1 # a0·b0
+ call _mul_1x1 # a0·b0
mov $lo,0(%rsp)
mov $hi,8(%rsp)
@@ -239,7 +239,7 @@ $code.=<<___;
mov 56(%rsp),$b
xor 48(%rsp),$a
xor 64(%rsp),$b
- call _mul_1x1 # (a0+a1)·(b0+b1)
+ call _mul_1x1 # (a0+a1)·(b0+b1)
___
@r=("%rbx","%rcx","%rdi","%rsi");
$code.=<<___;
diff --git a/drivers/builtin_openssl2/crypto/bn/asm/x86_64-mont5.pl b/drivers/builtin_openssl2/crypto/bn/asm/x86_64-mont5.pl
index dae0fe2453..235979181f 100755
--- a/drivers/builtin_openssl2/crypto/bn/asm/x86_64-mont5.pl
+++ b/drivers/builtin_openssl2/crypto/bn/asm/x86_64-mont5.pl
@@ -66,60 +66,113 @@ bn_mul_mont_gather5:
.align 16
.Lmul_enter:
mov ${num}d,${num}d
- mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
+ movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
+ lea .Linc(%rip),%r10
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
-___
-$code.=<<___ if ($win64);
- lea -0x28(%rsp),%rsp
- movaps %xmm6,(%rsp)
- movaps %xmm7,0x10(%rsp)
+
.Lmul_alloca:
-___
-$code.=<<___;
mov %rsp,%rax
lea 2($num),%r11
neg %r11
- lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+2))
+ lea -264(%rsp,%r11,8),%rsp # tp=alloca(8*(num+2)+256+8)
and \$-1024,%rsp # minimize TLB usage
mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
.Lmul_body:
- mov $bp,%r12 # reassign $bp
+ lea 128($bp),%r12 # reassign $bp (+size optimization)
___
$bp="%r12";
$STRIDE=2**5*8; # 5 is "window size"
$N=$STRIDE/4; # should match cache line size
$code.=<<___;
- mov %r10,%r11
- shr \$`log($N/8)/log(2)`,%r10
- and \$`$N/8-1`,%r11
- not %r10
- lea .Lmagic_masks(%rip),%rax
- and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
- lea 96($bp,%r11,8),$bp # pointer within 1st cache line
- movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
- movq 8(%rax,%r10,8),%xmm5 # cache line contains element
- movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
- movq 24(%rax,%r10,8),%xmm7
-
- movq `0*$STRIDE/4-96`($bp),%xmm0
- movq `1*$STRIDE/4-96`($bp),%xmm1
- pand %xmm4,%xmm0
- movq `2*$STRIDE/4-96`($bp),%xmm2
- pand %xmm5,%xmm1
- movq `3*$STRIDE/4-96`($bp),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
+ movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
+ movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
+ lea 24-112(%rsp,$num,8),%r10# place the mask after tp[num+3] (+ICache optimization)
+ and \$-16,%r10
+
+ pshufd \$0,%xmm5,%xmm5 # broadcast index
+ movdqa %xmm1,%xmm4
+ movdqa %xmm1,%xmm2
+___
+########################################################################
+# calculate mask by comparing 0..31 to index and save result to stack
+#
+$code.=<<___;
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0 # compare to 1,0
+ .byte 0x67
+ movdqa %xmm4,%xmm3
+___
+for($k=0;$k<$STRIDE/16-4;$k+=4) {
+$code.=<<___;
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1 # compare to 3,2
+ movdqa %xmm0,`16*($k+0)+112`(%r10)
+ movdqa %xmm4,%xmm0
+
+ paddd %xmm2,%xmm3
+ pcmpeqd %xmm5,%xmm2 # compare to 5,4
+ movdqa %xmm1,`16*($k+1)+112`(%r10)
+ movdqa %xmm4,%xmm1
+
+ paddd %xmm3,%xmm0
+ pcmpeqd %xmm5,%xmm3 # compare to 7,6
+ movdqa %xmm2,`16*($k+2)+112`(%r10)
+ movdqa %xmm4,%xmm2
+
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0
+ movdqa %xmm3,`16*($k+3)+112`(%r10)
+ movdqa %xmm4,%xmm3
+___
+}
+$code.=<<___; # last iteration can be optimized
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1
+ movdqa %xmm0,`16*($k+0)+112`(%r10)
+
+ paddd %xmm2,%xmm3
+ .byte 0x67
+ pcmpeqd %xmm5,%xmm2
+ movdqa %xmm1,`16*($k+1)+112`(%r10)
+
+ pcmpeqd %xmm5,%xmm3
+ movdqa %xmm2,`16*($k+2)+112`(%r10)
+ pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
+
+ pand `16*($k+1)-128`($bp),%xmm1
+ pand `16*($k+2)-128`($bp),%xmm2
+ movdqa %xmm3,`16*($k+3)+112`(%r10)
+ pand `16*($k+3)-128`($bp),%xmm3
+ por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+for($k=0;$k<$STRIDE/16-4;$k+=4) {
+$code.=<<___;
+ movdqa `16*($k+0)-128`($bp),%xmm4
+ movdqa `16*($k+1)-128`($bp),%xmm5
+ movdqa `16*($k+2)-128`($bp),%xmm2
+ pand `16*($k+0)+112`(%r10),%xmm4
+ movdqa `16*($k+3)-128`($bp),%xmm3
+ pand `16*($k+1)+112`(%r10),%xmm5
+ por %xmm4,%xmm0
+ pand `16*($k+2)+112`(%r10),%xmm2
+ por %xmm5,%xmm1
+ pand `16*($k+3)+112`(%r10),%xmm3
por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+}
+$code.=<<___;
+ por %xmm1,%xmm0
+ pshufd \$0x4e,%xmm0,%xmm1
+ por %xmm1,%xmm0
lea $STRIDE($bp),$bp
- por %xmm3,%xmm0
-
movq %xmm0,$m0 # m0=bp[0]
mov ($n0),$n0 # pull n0[0] value
@@ -128,29 +181,14 @@ $code.=<<___;
xor $i,$i # i=0
xor $j,$j # j=0
- movq `0*$STRIDE/4-96`($bp),%xmm0
- movq `1*$STRIDE/4-96`($bp),%xmm1
- pand %xmm4,%xmm0
- movq `2*$STRIDE/4-96`($bp),%xmm2
- pand %xmm5,%xmm1
-
mov $n0,$m1
mulq $m0 # ap[0]*bp[0]
mov %rax,$lo0
mov ($np),%rax
- movq `3*$STRIDE/4-96`($bp),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
-
imulq $lo0,$m1 # "tp[0]"*n0
mov %rdx,$hi0
- por %xmm2,%xmm0
- lea $STRIDE($bp),$bp
- por %xmm3,%xmm0
-
mulq $m1 # np[0]*m1
add %rax,$lo0 # discarded
mov 8($ap),%rax
@@ -183,8 +221,6 @@ $code.=<<___;
cmp $num,$j
jne .L1st
- movq %xmm0,$m0 # bp[1]
-
add %rax,$hi1
mov ($ap),%rax # ap[0]
adc \$0,%rdx
@@ -204,33 +240,46 @@ $code.=<<___;
jmp .Louter
.align 16
.Louter:
+ lea 24+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
+ and \$-16,%rdx
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+___
+for($k=0;$k<$STRIDE/16;$k+=4) {
+$code.=<<___;
+ movdqa `16*($k+0)-128`($bp),%xmm0
+ movdqa `16*($k+1)-128`($bp),%xmm1
+ movdqa `16*($k+2)-128`($bp),%xmm2
+ movdqa `16*($k+3)-128`($bp),%xmm3
+ pand `16*($k+0)-128`(%rdx),%xmm0
+ pand `16*($k+1)-128`(%rdx),%xmm1
+ por %xmm0,%xmm4
+ pand `16*($k+2)-128`(%rdx),%xmm2
+ por %xmm1,%xmm5
+ pand `16*($k+3)-128`(%rdx),%xmm3
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+___
+}
+$code.=<<___;
+ por %xmm5,%xmm4
+ pshufd \$0x4e,%xmm4,%xmm0
+ por %xmm4,%xmm0
+ lea $STRIDE($bp),$bp
+ movq %xmm0,$m0 # m0=bp[i]
+
xor $j,$j # j=0
mov $n0,$m1
mov (%rsp),$lo0
- movq `0*$STRIDE/4-96`($bp),%xmm0
- movq `1*$STRIDE/4-96`($bp),%xmm1
- pand %xmm4,%xmm0
- movq `2*$STRIDE/4-96`($bp),%xmm2
- pand %xmm5,%xmm1
-
mulq $m0 # ap[0]*bp[i]
add %rax,$lo0 # ap[0]*bp[i]+tp[0]
mov ($np),%rax
adc \$0,%rdx
- movq `3*$STRIDE/4-96`($bp),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
-
imulq $lo0,$m1 # tp[0]*n0
mov %rdx,$hi0
- por %xmm2,%xmm0
- lea $STRIDE($bp),$bp
- por %xmm3,%xmm0
-
mulq $m1 # np[0]*m1
add %rax,$lo0 # discarded
mov 8($ap),%rax
@@ -266,8 +315,6 @@ $code.=<<___;
cmp $num,$j
jne .Linner
- movq %xmm0,$m0 # bp[i+1]
-
add %rax,$hi1
mov ($ap),%rax # ap[0]
adc \$0,%rdx
@@ -321,13 +368,7 @@ $code.=<<___;
mov 8(%rsp,$num,8),%rsi # restore %rsp
mov \$1,%rax
-___
-$code.=<<___ if ($win64);
- movaps (%rsi),%xmm6
- movaps 0x10(%rsi),%xmm7
- lea 0x28(%rsi),%rsi
-___
-$code.=<<___;
+
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
@@ -348,91 +389,130 @@ $code.=<<___;
bn_mul4x_mont_gather5:
.Lmul4x_enter:
mov ${num}d,${num}d
- mov `($win64?56:8)`(%rsp),%r10d # load 7th argument
+ movd `($win64?56:8)`(%rsp),%xmm5 # load 7th argument
+ lea .Linc(%rip),%r10
push %rbx
push %rbp
push %r12
push %r13
push %r14
push %r15
-___
-$code.=<<___ if ($win64);
- lea -0x28(%rsp),%rsp
- movaps %xmm6,(%rsp)
- movaps %xmm7,0x10(%rsp)
+
.Lmul4x_alloca:
-___
-$code.=<<___;
mov %rsp,%rax
lea 4($num),%r11
neg %r11
- lea (%rsp,%r11,8),%rsp # tp=alloca(8*(num+4))
+ lea -256(%rsp,%r11,8),%rsp # tp=alloca(8*(num+4)+256)
and \$-1024,%rsp # minimize TLB usage
mov %rax,8(%rsp,$num,8) # tp[num+1]=%rsp
.Lmul4x_body:
mov $rp,16(%rsp,$num,8) # tp[num+2]=$rp
- mov %rdx,%r12 # reassign $bp
+ lea 128(%rdx),%r12 # reassign $bp (+size optimization)
___
$bp="%r12";
$STRIDE=2**5*8; # 5 is "window size"
$N=$STRIDE/4; # should match cache line size
$code.=<<___;
- mov %r10,%r11
- shr \$`log($N/8)/log(2)`,%r10
- and \$`$N/8-1`,%r11
- not %r10
- lea .Lmagic_masks(%rip),%rax
- and \$`2**5/($N/8)-1`,%r10 # 5 is "window size"
- lea 96($bp,%r11,8),$bp # pointer within 1st cache line
- movq 0(%rax,%r10,8),%xmm4 # set of masks denoting which
- movq 8(%rax,%r10,8),%xmm5 # cache line contains element
- movq 16(%rax,%r10,8),%xmm6 # denoted by 7th argument
- movq 24(%rax,%r10,8),%xmm7
-
- movq `0*$STRIDE/4-96`($bp),%xmm0
- movq `1*$STRIDE/4-96`($bp),%xmm1
- pand %xmm4,%xmm0
- movq `2*$STRIDE/4-96`($bp),%xmm2
- pand %xmm5,%xmm1
- movq `3*$STRIDE/4-96`($bp),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
+ movdqa 0(%r10),%xmm0 # 00000001000000010000000000000000
+ movdqa 16(%r10),%xmm1 # 00000002000000020000000200000002
+ lea 32-112(%rsp,$num,8),%r10# place the mask after tp[num+4] (+ICache optimization)
+
+ pshufd \$0,%xmm5,%xmm5 # broadcast index
+ movdqa %xmm1,%xmm4
+ .byte 0x67,0x67
+ movdqa %xmm1,%xmm2
+___
+########################################################################
+# calculate mask by comparing 0..31 to index and save result to stack
+#
+$code.=<<___;
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0 # compare to 1,0
+ .byte 0x67
+ movdqa %xmm4,%xmm3
+___
+for($k=0;$k<$STRIDE/16-4;$k+=4) {
+$code.=<<___;
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1 # compare to 3,2
+ movdqa %xmm0,`16*($k+0)+112`(%r10)
+ movdqa %xmm4,%xmm0
+
+ paddd %xmm2,%xmm3
+ pcmpeqd %xmm5,%xmm2 # compare to 5,4
+ movdqa %xmm1,`16*($k+1)+112`(%r10)
+ movdqa %xmm4,%xmm1
+
+ paddd %xmm3,%xmm0
+ pcmpeqd %xmm5,%xmm3 # compare to 7,6
+ movdqa %xmm2,`16*($k+2)+112`(%r10)
+ movdqa %xmm4,%xmm2
+
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0
+ movdqa %xmm3,`16*($k+3)+112`(%r10)
+ movdqa %xmm4,%xmm3
+___
+}
+$code.=<<___; # last iteration can be optimized
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1
+ movdqa %xmm0,`16*($k+0)+112`(%r10)
+
+ paddd %xmm2,%xmm3
+ .byte 0x67
+ pcmpeqd %xmm5,%xmm2
+ movdqa %xmm1,`16*($k+1)+112`(%r10)
+
+ pcmpeqd %xmm5,%xmm3
+ movdqa %xmm2,`16*($k+2)+112`(%r10)
+ pand `16*($k+0)-128`($bp),%xmm0 # while it's still in register
+
+ pand `16*($k+1)-128`($bp),%xmm1
+ pand `16*($k+2)-128`($bp),%xmm2
+ movdqa %xmm3,`16*($k+3)+112`(%r10)
+ pand `16*($k+3)-128`($bp),%xmm3
+ por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+for($k=0;$k<$STRIDE/16-4;$k+=4) {
+$code.=<<___;
+ movdqa `16*($k+0)-128`($bp),%xmm4
+ movdqa `16*($k+1)-128`($bp),%xmm5
+ movdqa `16*($k+2)-128`($bp),%xmm2
+ pand `16*($k+0)+112`(%r10),%xmm4
+ movdqa `16*($k+3)-128`($bp),%xmm3
+ pand `16*($k+1)+112`(%r10),%xmm5
+ por %xmm4,%xmm0
+ pand `16*($k+2)+112`(%r10),%xmm2
+ por %xmm5,%xmm1
+ pand `16*($k+3)+112`(%r10),%xmm3
por %xmm2,%xmm0
+ por %xmm3,%xmm1
+___
+}
+$code.=<<___;
+ por %xmm1,%xmm0
+ pshufd \$0x4e,%xmm0,%xmm1
+ por %xmm1,%xmm0
lea $STRIDE($bp),$bp
- por %xmm3,%xmm0
-
movq %xmm0,$m0 # m0=bp[0]
+
mov ($n0),$n0 # pull n0[0] value
mov ($ap),%rax
xor $i,$i # i=0
xor $j,$j # j=0
- movq `0*$STRIDE/4-96`($bp),%xmm0
- movq `1*$STRIDE/4-96`($bp),%xmm1
- pand %xmm4,%xmm0
- movq `2*$STRIDE/4-96`($bp),%xmm2
- pand %xmm5,%xmm1
-
mov $n0,$m1
mulq $m0 # ap[0]*bp[0]
mov %rax,$A[0]
mov ($np),%rax
- movq `3*$STRIDE/4-96`($bp),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
-
imulq $A[0],$m1 # "tp[0]"*n0
mov %rdx,$A[1]
- por %xmm2,%xmm0
- lea $STRIDE($bp),$bp
- por %xmm3,%xmm0
-
mulq $m1 # np[0]*m1
add %rax,$A[0] # discarded
mov 8($ap),%rax
@@ -550,8 +630,6 @@ $code.=<<___;
mov $N[1],-16(%rsp,$j,8) # tp[j-1]
mov %rdx,$N[0]
- movq %xmm0,$m0 # bp[1]
-
xor $N[1],$N[1]
add $A[0],$N[0]
adc \$0,$N[1]
@@ -561,12 +639,34 @@ $code.=<<___;
lea 1($i),$i # i++
.align 4
.Louter4x:
+ lea 32+128(%rsp,$num,8),%rdx # where 256-byte mask is (+size optimization)
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+___
+for($k=0;$k<$STRIDE/16;$k+=4) {
+$code.=<<___;
+ movdqa `16*($k+0)-128`($bp),%xmm0
+ movdqa `16*($k+1)-128`($bp),%xmm1
+ movdqa `16*($k+2)-128`($bp),%xmm2
+ movdqa `16*($k+3)-128`($bp),%xmm3
+ pand `16*($k+0)-128`(%rdx),%xmm0
+ pand `16*($k+1)-128`(%rdx),%xmm1
+ por %xmm0,%xmm4
+ pand `16*($k+2)-128`(%rdx),%xmm2
+ por %xmm1,%xmm5
+ pand `16*($k+3)-128`(%rdx),%xmm3
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+___
+}
+$code.=<<___;
+ por %xmm5,%xmm4
+ pshufd \$0x4e,%xmm4,%xmm0
+ por %xmm4,%xmm0
+ lea $STRIDE($bp),$bp
+ movq %xmm0,$m0 # m0=bp[i]
+
xor $j,$j # j=0
- movq `0*$STRIDE/4-96`($bp),%xmm0
- movq `1*$STRIDE/4-96`($bp),%xmm1
- pand %xmm4,%xmm0
- movq `2*$STRIDE/4-96`($bp),%xmm2
- pand %xmm5,%xmm1
mov (%rsp),$A[0]
mov $n0,$m1
@@ -575,18 +675,9 @@ $code.=<<___;
mov ($np),%rax
adc \$0,%rdx
- movq `3*$STRIDE/4-96`($bp),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
-
imulq $A[0],$m1 # tp[0]*n0
mov %rdx,$A[1]
- por %xmm2,%xmm0
- lea $STRIDE($bp),$bp
- por %xmm3,%xmm0
-
mulq $m1 # np[0]*m1
add %rax,$A[0] # "$N[0]", discarded
mov 8($ap),%rax
@@ -718,7 +809,6 @@ $code.=<<___;
mov $N[0],-24(%rsp,$j,8) # tp[j-1]
mov %rdx,$N[0]
- movq %xmm0,$m0 # bp[i+1]
mov $N[1],-16(%rsp,$j,8) # tp[j-1]
xor $N[1],$N[1]
@@ -809,13 +899,7 @@ ___
$code.=<<___;
mov 8(%rsp,$num,8),%rsi # restore %rsp
mov \$1,%rax
-___
-$code.=<<___ if ($win64);
- movaps (%rsi),%xmm6
- movaps 0x10(%rsi),%xmm7
- lea 0x28(%rsi),%rsi
-___
-$code.=<<___;
+
mov (%rsi),%r15
mov 8(%rsi),%r14
mov 16(%rsi),%r13
@@ -830,8 +914,8 @@ ___
}}}
{
-my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9") : # Win64 order
- ("%rdi","%rsi","%rdx","%rcx"); # Unix order
+my ($inp,$num,$tbl,$idx)=$win64?("%rcx","%rdx","%r8", "%r9d") : # Win64 order
+ ("%rdi","%rsi","%rdx","%ecx"); # Unix order
my $out=$inp;
my $STRIDE=2**5*8;
my $N=$STRIDE/4;
@@ -859,53 +943,89 @@ bn_scatter5:
.type bn_gather5,\@abi-omnipotent
.align 16
bn_gather5:
-___
-$code.=<<___ if ($win64);
-.LSEH_begin_bn_gather5:
+.LSEH_begin_bn_gather5: # Win64 thing, but harmless in other cases
# I can't trust assembler to use specific encoding:-(
- .byte 0x48,0x83,0xec,0x28 #sub \$0x28,%rsp
- .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp)
- .byte 0x0f,0x29,0x7c,0x24,0x10 #movdqa %xmm7,0x10(%rsp)
+ .byte 0x4c,0x8d,0x14,0x24 # lea (%rsp),%r10
+ .byte 0x48,0x81,0xec,0x08,0x01,0x00,0x00 # sub $0x108,%rsp
+ lea .Linc(%rip),%rax
+ and \$-16,%rsp # shouldn't be formally required
+
+ movd $idx,%xmm5
+ movdqa 0(%rax),%xmm0 # 00000001000000010000000000000000
+ movdqa 16(%rax),%xmm1 # 00000002000000020000000200000002
+ lea 128($tbl),%r11 # size optimization
+ lea 128(%rsp),%rax # size optimization
+
+ pshufd \$0,%xmm5,%xmm5 # broadcast $idx
+ movdqa %xmm1,%xmm4
+ movdqa %xmm1,%xmm2
___
+########################################################################
+# calculate mask by comparing 0..31 to $idx and save result to stack
+#
+for($i=0;$i<$STRIDE/16;$i+=4) {
+$code.=<<___;
+ paddd %xmm0,%xmm1
+ pcmpeqd %xmm5,%xmm0 # compare to 1,0
+___
+$code.=<<___ if ($i);
+ movdqa %xmm3,`16*($i-1)-128`(%rax)
+___
+$code.=<<___;
+ movdqa %xmm4,%xmm3
+
+ paddd %xmm1,%xmm2
+ pcmpeqd %xmm5,%xmm1 # compare to 3,2
+ movdqa %xmm0,`16*($i+0)-128`(%rax)
+ movdqa %xmm4,%xmm0
+
+ paddd %xmm2,%xmm3
+ pcmpeqd %xmm5,%xmm2 # compare to 5,4
+ movdqa %xmm1,`16*($i+1)-128`(%rax)
+ movdqa %xmm4,%xmm1
+
+ paddd %xmm3,%xmm0
+ pcmpeqd %xmm5,%xmm3 # compare to 7,6
+ movdqa %xmm2,`16*($i+2)-128`(%rax)
+ movdqa %xmm4,%xmm2
+___
+}
$code.=<<___;
- mov $idx,%r11
- shr \$`log($N/8)/log(2)`,$idx
- and \$`$N/8-1`,%r11
- not $idx
- lea .Lmagic_masks(%rip),%rax
- and \$`2**5/($N/8)-1`,$idx # 5 is "window size"
- lea 96($tbl,%r11,8),$tbl # pointer within 1st cache line
- movq 0(%rax,$idx,8),%xmm4 # set of masks denoting which
- movq 8(%rax,$idx,8),%xmm5 # cache line contains element
- movq 16(%rax,$idx,8),%xmm6 # denoted by 7th argument
- movq 24(%rax,$idx,8),%xmm7
+ movdqa %xmm3,`16*($i-1)-128`(%rax)
jmp .Lgather
-.align 16
-.Lgather:
- movq `0*$STRIDE/4-96`($tbl),%xmm0
- movq `1*$STRIDE/4-96`($tbl),%xmm1
- pand %xmm4,%xmm0
- movq `2*$STRIDE/4-96`($tbl),%xmm2
- pand %xmm5,%xmm1
- movq `3*$STRIDE/4-96`($tbl),%xmm3
- pand %xmm6,%xmm2
- por %xmm1,%xmm0
- pand %xmm7,%xmm3
- por %xmm2,%xmm0
- lea $STRIDE($tbl),$tbl
- por %xmm3,%xmm0
+.align 32
+.Lgather:
+ pxor %xmm4,%xmm4
+ pxor %xmm5,%xmm5
+___
+for($i=0;$i<$STRIDE/16;$i+=4) {
+$code.=<<___;
+ movdqa `16*($i+0)-128`(%r11),%xmm0
+ movdqa `16*($i+1)-128`(%r11),%xmm1
+ movdqa `16*($i+2)-128`(%r11),%xmm2
+ pand `16*($i+0)-128`(%rax),%xmm0
+ movdqa `16*($i+3)-128`(%r11),%xmm3
+ pand `16*($i+1)-128`(%rax),%xmm1
+ por %xmm0,%xmm4
+ pand `16*($i+2)-128`(%rax),%xmm2
+ por %xmm1,%xmm5
+ pand `16*($i+3)-128`(%rax),%xmm3
+ por %xmm2,%xmm4
+ por %xmm3,%xmm5
+___
+}
+$code.=<<___;
+ por %xmm5,%xmm4
+ lea $STRIDE(%r11),%r11
+ pshufd \$0x4e,%xmm4,%xmm0
+ por %xmm4,%xmm0
movq %xmm0,($out) # m0=bp[0]
lea 8($out),$out
sub \$1,$num
jnz .Lgather
-___
-$code.=<<___ if ($win64);
- movaps (%rsp),%xmm6
- movaps 0x10(%rsp),%xmm7
- lea 0x28(%rsp),%rsp
-___
-$code.=<<___;
+
+ lea (%r10),%rsp
ret
.LSEH_end_bn_gather5:
.size bn_gather5,.-bn_gather5
@@ -913,9 +1033,9 @@ ___
}
$code.=<<___;
.align 64
-.Lmagic_masks:
- .long 0,0, 0,0, 0,0, -1,-1
- .long 0,0, 0,0, 0,0, 0,0
+.Linc:
+ .long 0,0, 1,1
+ .long 2,2, 2,2
.asciz "Montgomery Multiplication with scatter/gather for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
___
@@ -954,7 +1074,7 @@ mul_handler:
cmp %r10,%rbx # context->Rip<end of prologue label
jb .Lcommon_seh_tail
- lea `40+48`(%rax),%rax
+ lea 48(%rax),%rax
mov 4(%r11),%r10d # HandlerData[1]
lea (%rsi,%r10),%r10 # end of alloca label
@@ -971,9 +1091,7 @@ mul_handler:
mov 192($context),%r10 # pull $num
mov 8(%rax,%r10,8),%rax # pull saved stack pointer
- movaps (%rax),%xmm0
- movaps 16(%rax),%xmm1
- lea `40+48`(%rax),%rax
+ lea 48(%rax),%rax
mov -8(%rax),%rbx
mov -16(%rax),%rbp
@@ -987,8 +1105,6 @@ mul_handler:
mov %r13,224($context) # restore context->R13
mov %r14,232($context) # restore context->R14
mov %r15,240($context) # restore context->R15
- movups %xmm0,512($context) # restore context->Xmm6
- movups %xmm1,528($context) # restore context->Xmm7
.Lcommon_seh_tail:
mov 8(%rax),%rdi
@@ -1057,10 +1173,9 @@ mul_handler:
.rva .Lmul4x_alloca,.Lmul4x_body,.Lmul4x_epilogue # HandlerData[]
.align 8
.LSEH_info_bn_gather5:
- .byte 0x01,0x0d,0x05,0x00
- .byte 0x0d,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7
- .byte 0x08,0x68,0x00,0x00 #movaps (rsp),xmm6
- .byte 0x04,0x42,0x00,0x00 #sub rsp,0x28
+ .byte 0x01,0x0b,0x03,0x0a
+ .byte 0x0b,0x01,0x21,0x00 # sub rsp,0x108
+ .byte 0x04,0xa3,0x00,0x00 # lea r10,(rsp), set_frame r10
.align 8
___
}