summaryrefslogtreecommitdiff
path: root/deps/openssl/asm_obsolete/x64-macosx-gas/ec/ecp_nistz256-x86_64.s
diff options
context:
space:
mode:
Diffstat (limited to 'deps/openssl/asm_obsolete/x64-macosx-gas/ec/ecp_nistz256-x86_64.s')
-rw-r--r--deps/openssl/asm_obsolete/x64-macosx-gas/ec/ecp_nistz256-x86_64.s119
1 files changed, 60 insertions, 59 deletions
diff --git a/deps/openssl/asm_obsolete/x64-macosx-gas/ec/ecp_nistz256-x86_64.s b/deps/openssl/asm_obsolete/x64-macosx-gas/ec/ecp_nistz256-x86_64.s
index 30456b900f..62e8d00ccd 100644
--- a/deps/openssl/asm_obsolete/x64-macosx-gas/ec/ecp_nistz256-x86_64.s
+++ b/deps/openssl/asm_obsolete/x64-macosx-gas/ec/ecp_nistz256-x86_64.s
@@ -27,6 +27,7 @@ _ecp_nistz256_mul_by_2:
pushq %r13
movq 0(%rsi),%r8
+ xorq %r13,%r13
movq 8(%rsi),%r9
addq %r8,%r8
movq 16(%rsi),%r10
@@ -37,7 +38,7 @@ _ecp_nistz256_mul_by_2:
adcq %r10,%r10
adcq %r11,%r11
movq %r9,%rdx
- sbbq %r13,%r13
+ adcq $0,%r13
subq 0(%rsi),%r8
movq %r10,%rcx
@@ -45,14 +46,14 @@ _ecp_nistz256_mul_by_2:
sbbq 16(%rsi),%r10
movq %r11,%r12
sbbq 24(%rsi),%r11
- testq %r13,%r13
+ sbbq $0,%r13
- cmovzq %rax,%r8
- cmovzq %rdx,%r9
+ cmovcq %rax,%r8
+ cmovcq %rdx,%r9
movq %r8,0(%rdi)
- cmovzq %rcx,%r10
+ cmovcq %rcx,%r10
movq %r9,8(%rdi)
- cmovzq %r12,%r11
+ cmovcq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
@@ -149,12 +150,12 @@ _ecp_nistz256_mul_by_3:
sbbq $0,%r10
movq %r11,%r12
sbbq L$poly+24(%rip),%r11
- testq %r13,%r13
+ sbbq $0,%r13
- cmovzq %rax,%r8
- cmovzq %rdx,%r9
- cmovzq %rcx,%r10
- cmovzq %r12,%r11
+ cmovcq %rax,%r8
+ cmovcq %rdx,%r9
+ cmovcq %rcx,%r10
+ cmovcq %r12,%r11
xorq %r13,%r13
addq 0(%rsi),%r8
@@ -171,14 +172,14 @@ _ecp_nistz256_mul_by_3:
sbbq $0,%r10
movq %r11,%r12
sbbq L$poly+24(%rip),%r11
- testq %r13,%r13
+ sbbq $0,%r13
- cmovzq %rax,%r8
- cmovzq %rdx,%r9
+ cmovcq %rax,%r8
+ cmovcq %rdx,%r9
movq %r8,0(%rdi)
- cmovzq %rcx,%r10
+ cmovcq %rcx,%r10
movq %r9,8(%rdi)
- cmovzq %r12,%r11
+ cmovcq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
@@ -217,14 +218,14 @@ _ecp_nistz256_add:
sbbq 16(%rsi),%r10
movq %r11,%r12
sbbq 24(%rsi),%r11
- testq %r13,%r13
+ sbbq $0,%r13
- cmovzq %rax,%r8
- cmovzq %rdx,%r9
+ cmovcq %rax,%r8
+ cmovcq %rdx,%r9
movq %r8,0(%rdi)
- cmovzq %rcx,%r10
+ cmovcq %rcx,%r10
movq %r9,8(%rdi)
- cmovzq %r12,%r11
+ cmovcq %r12,%r11
movq %r10,16(%rdi)
movq %r11,24(%rdi)
@@ -993,13 +994,14 @@ _ecp_nistz256_avx2_select_w7:
.p2align 5
__ecp_nistz256_add_toq:
+ xorq %r11,%r11
addq 0(%rbx),%r12
adcq 8(%rbx),%r13
movq %r12,%rax
adcq 16(%rbx),%r8
adcq 24(%rbx),%r9
movq %r13,%rbp
- sbbq %r11,%r11
+ adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
@@ -1007,14 +1009,14 @@ __ecp_nistz256_add_toq:
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
- testq %r11,%r11
+ sbbq $0,%r11
- cmovzq %rax,%r12
- cmovzq %rbp,%r13
+ cmovcq %rax,%r12
+ cmovcq %rbp,%r13
movq %r12,0(%rdi)
- cmovzq %rcx,%r8
+ cmovcq %rcx,%r8
movq %r13,8(%rdi)
- cmovzq %r10,%r9
+ cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
@@ -1082,13 +1084,14 @@ __ecp_nistz256_subq:
.p2align 5
__ecp_nistz256_mul_by_2q:
+ xorq %r11,%r11
addq %r12,%r12
adcq %r13,%r13
movq %r12,%rax
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
- sbbq %r11,%r11
+ adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
@@ -1096,14 +1099,14 @@ __ecp_nistz256_mul_by_2q:
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
- testq %r11,%r11
+ sbbq $0,%r11
- cmovzq %rax,%r12
- cmovzq %rbp,%r13
+ cmovcq %rax,%r12
+ cmovcq %rbp,%r13
movq %r12,0(%rdi)
- cmovzq %rcx,%r8
+ cmovcq %rcx,%r8
movq %r13,8(%rdi)
- cmovzq %r10,%r9
+ cmovcq %r10,%r9
movq %r8,16(%rdi)
movq %r9,24(%rdi)
@@ -1333,16 +1336,14 @@ _ecp_nistz256_point_add:
movq %rdx,%rsi
movdqa %xmm0,384(%rsp)
movdqa %xmm1,384+16(%rsp)
- por %xmm0,%xmm1
movdqa %xmm2,416(%rsp)
movdqa %xmm3,416+16(%rsp)
- por %xmm2,%xmm3
movdqa %xmm4,448(%rsp)
movdqa %xmm5,448+16(%rsp)
- por %xmm1,%xmm3
+ por %xmm4,%xmm5
movdqu 0(%rsi),%xmm0
- pshufd $0xb1,%xmm3,%xmm5
+ pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rsi),%xmm1
movdqu 32(%rsi),%xmm2
por %xmm3,%xmm5
@@ -1354,14 +1355,14 @@ _ecp_nistz256_point_add:
movdqa %xmm0,480(%rsp)
pshufd $0x1e,%xmm5,%xmm4
movdqa %xmm1,480+16(%rsp)
- por %xmm0,%xmm1
-.byte 102,72,15,110,199
+ movdqu 64(%rsi),%xmm0
+ movdqu 80(%rsi),%xmm1
movdqa %xmm2,512(%rsp)
movdqa %xmm3,512+16(%rsp)
- por %xmm2,%xmm3
por %xmm4,%xmm5
pxor %xmm4,%xmm4
- por %xmm1,%xmm3
+ por %xmm0,%xmm1
+.byte 102,72,15,110,199
leaq 64-0(%rsi),%rsi
movq %rax,544+0(%rsp)
@@ -1372,8 +1373,8 @@ _ecp_nistz256_point_add:
call __ecp_nistz256_sqr_montq
pcmpeqd %xmm4,%xmm5
- pshufd $0xb1,%xmm3,%xmm4
- por %xmm3,%xmm4
+ pshufd $0xb1,%xmm1,%xmm4
+ por %xmm1,%xmm4
pshufd $0,%xmm5,%xmm5
pshufd $0x1e,%xmm4,%xmm3
por %xmm3,%xmm4
@@ -1556,6 +1557,7 @@ L$add_proceedq:
+ xorq %r11,%r11
addq %r12,%r12
leaq 96(%rsp),%rsi
adcq %r13,%r13
@@ -1563,7 +1565,7 @@ L$add_proceedq:
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
- sbbq %r11,%r11
+ adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
@@ -1571,15 +1573,15 @@ L$add_proceedq:
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
- testq %r11,%r11
+ sbbq $0,%r11
- cmovzq %rax,%r12
+ cmovcq %rax,%r12
movq 0(%rsi),%rax
- cmovzq %rbp,%r13
+ cmovcq %rbp,%r13
movq 8(%rsi),%rbp
- cmovzq %rcx,%r8
+ cmovcq %rcx,%r8
movq 16(%rsi),%rcx
- cmovzq %r10,%r9
+ cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq
@@ -1733,16 +1735,14 @@ _ecp_nistz256_point_add_affine:
movq 64+24(%rsi),%r8
movdqa %xmm0,320(%rsp)
movdqa %xmm1,320+16(%rsp)
- por %xmm0,%xmm1
movdqa %xmm2,352(%rsp)
movdqa %xmm3,352+16(%rsp)
- por %xmm2,%xmm3
movdqa %xmm4,384(%rsp)
movdqa %xmm5,384+16(%rsp)
- por %xmm1,%xmm3
+ por %xmm4,%xmm5
movdqu 0(%rbx),%xmm0
- pshufd $0xb1,%xmm3,%xmm5
+ pshufd $0xb1,%xmm5,%xmm3
movdqu 16(%rbx),%xmm1
movdqu 32(%rbx),%xmm2
por %xmm3,%xmm5
@@ -1860,6 +1860,7 @@ _ecp_nistz256_point_add_affine:
+ xorq %r11,%r11
addq %r12,%r12
leaq 192(%rsp),%rsi
adcq %r13,%r13
@@ -1867,7 +1868,7 @@ _ecp_nistz256_point_add_affine:
adcq %r8,%r8
adcq %r9,%r9
movq %r13,%rbp
- sbbq %r11,%r11
+ adcq $0,%r11
subq $-1,%r12
movq %r8,%rcx
@@ -1875,15 +1876,15 @@ _ecp_nistz256_point_add_affine:
sbbq $0,%r8
movq %r9,%r10
sbbq %r15,%r9
- testq %r11,%r11
+ sbbq $0,%r11
- cmovzq %rax,%r12
+ cmovcq %rax,%r12
movq 0(%rsi),%rax
- cmovzq %rbp,%r13
+ cmovcq %rbp,%r13
movq 8(%rsi),%rbp
- cmovzq %rcx,%r8
+ cmovcq %rcx,%r8
movq 16(%rsi),%rcx
- cmovzq %r10,%r9
+ cmovcq %r10,%r9
movq 24(%rsi),%r10
call __ecp_nistz256_subq