summaryrefslogtreecommitdiff
path: root/deps/openssl/openssl/crypto/rc4/asm/rc4-x86_64.pl
diff options
context:
space:
mode:
Diffstat (limited to 'deps/openssl/openssl/crypto/rc4/asm/rc4-x86_64.pl')
-rwxr-xr-xdeps/openssl/openssl/crypto/rc4/asm/rc4-x86_64.pl15
1 files changed, 12 insertions, 3 deletions
diff --git a/deps/openssl/openssl/crypto/rc4/asm/rc4-x86_64.pl b/deps/openssl/openssl/crypto/rc4/asm/rc4-x86_64.pl
index aaed2b1e61..1a9cc47d72 100755
--- a/deps/openssl/openssl/crypto/rc4/asm/rc4-x86_64.pl
+++ b/deps/openssl/openssl/crypto/rc4/asm/rc4-x86_64.pl
@@ -8,7 +8,7 @@
#
# ====================================================================
-# Written by Andy Polyakov <appro@fy.chalmers.se> for the OpenSSL
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
@@ -48,7 +48,7 @@
# April 2005
#
-# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
+# P4 EM64T core appears to be "allergic" to 64-bit inc/dec. Replacing
# those with add/sub results in 50% performance improvement of folded
# loop...
@@ -88,7 +88,7 @@
# The only code path that was not modified is P4-specific one. Non-P4
# Intel code path optimization is heavily based on submission by Maxim
# Perminov, Maxim Locktyukhin and Jim Guilford of Intel. I've used
-# some of the ideas even in attempt to optmize the original RC4_INT
+# some of the ideas even in attempt to optimize the original RC4_INT
# code path... Current performance in cycles per processed byte (less
# is better) and improvement coefficients relative to previous
# version of this module are:
@@ -142,9 +142,13 @@ RC4: or $len,$len
jne .Lentry
ret
.Lentry:
+.cfi_startproc
push %rbx
+.cfi_push %rbx
push %r12
+.cfi_push %r12
push %r13
+.cfi_push %r13
.Lprologue:
mov $len,%r11
mov $inp,%r12
@@ -427,11 +431,16 @@ $code.=<<___;
movl $YY#d,-4($dat)
mov (%rsp),%r13
+.cfi_restore %r13
mov 8(%rsp),%r12
+.cfi_restore %r12
mov 16(%rsp),%rbx
+.cfi_restore %rbx
add \$24,%rsp
+.cfi_adjust_cfa_offset -24
.Lepilogue:
ret
+.cfi_endproc
.size RC4,.-RC4
___
}