Skip to content

Commit

Permalink
add generated boringssl files
Browse files Browse the repository at this point in the history
  • Loading branch information
spoonincode committed Sep 6, 2023
1 parent 636117f commit fffa8f9
Show file tree
Hide file tree
Showing 166 changed files with 288,595 additions and 0 deletions.
606 changes: 606 additions & 0 deletions libraries/libfc/libraries/boringssl/boringssl/CMakeLists.txt

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
// This file is generated from a similarly-named Perl script in the BoringSSL
// source tree. Do not edit by hand.

#include <openssl/asm_base.h>

#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_AARCH64) && defined(__APPLE__)
#include <openssl/arm_arch.h>

.text

// BN_ULONG bn_add_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
// size_t num);

.globl _bn_add_words
.private_extern _bn_add_words
.align 4
_bn_add_words:
AARCH64_VALID_CALL_TARGET
# Clear the carry flag.
cmn xzr, xzr

# aarch64 can load two registers at a time, so we do two loop iterations at
# at a time. Split x3 = 2 * x8 + x3. This allows loop
# operations to use CBNZ without clobbering the carry flag.
lsr x8, x3, #1
and x3, x3, #1

cbz x8, Ladd_tail
Ladd_loop:
ldp x4, x5, [x1], #16
ldp x6, x7, [x2], #16
sub x8, x8, #1
adcs x4, x4, x6
adcs x5, x5, x7
stp x4, x5, [x0], #16
cbnz x8, Ladd_loop

Ladd_tail:
cbz x3, Ladd_exit
ldr x4, [x1], #8
ldr x6, [x2], #8
adcs x4, x4, x6
str x4, [x0], #8

Ladd_exit:
cset x0, cs
ret


// BN_ULONG bn_sub_words(BN_ULONG *rp, const BN_ULONG *ap, const BN_ULONG *bp,
// size_t num);

.globl _bn_sub_words
.private_extern _bn_sub_words
.align 4
_bn_sub_words:
AARCH64_VALID_CALL_TARGET
# Set the carry flag. Arm's borrow bit is flipped from the carry flag,
# so we want C = 1 here.
cmp xzr, xzr

# aarch64 can load two registers at a time, so we do two loop iterations at
# at a time. Split x3 = 2 * x8 + x3. This allows loop
# operations to use CBNZ without clobbering the carry flag.
lsr x8, x3, #1
and x3, x3, #1

cbz x8, Lsub_tail
Lsub_loop:
ldp x4, x5, [x1], #16
ldp x6, x7, [x2], #16
sub x8, x8, #1
sbcs x4, x4, x6
sbcs x5, x5, x7
stp x4, x5, [x0], #16
cbnz x8, Lsub_loop

Lsub_tail:
cbz x3, Lsub_exit
ldr x4, [x1], #8
ldr x6, [x2], #8
sbcs x4, x4, x6
str x4, [x0], #8

Lsub_exit:
cset x0, cc
ret

#endif // !OPENSSL_NO_ASM && defined(OPENSSL_AARCH64) && defined(__APPLE__)
Loading

0 comments on commit fffa8f9

Please sign in to comment.