aboutsummaryrefslogtreecommitdiffstats
path: root/runtime/arm/i64_stof.S
diff options
context:
space:
mode:
authorBernhard Schommer <bernhardschommer@gmail.com>2016-08-05 14:05:34 +0200
committerBernhard Schommer <bernhardschommer@gmail.com>2016-08-05 14:05:34 +0200
commit028aaefc44b8ed8bafd8b8896fedb53f6e68df3c (patch)
treed7f9325da52050a64e5c9ced1017a4f57c674ff3 /runtime/arm/i64_stof.S
parent4ac759d0bceef49d16197e3bb8c9767ece693c5e (diff)
downloadcompcert-kvx-028aaefc44b8ed8bafd8b8896fedb53f6e68df3c.tar.gz
compcert-kvx-028aaefc44b8ed8bafd8b8896fedb53f6e68df3c.zip
Implement support for big endian arm targets.
Adds support for the big endian arm targets by making the target endianess flag configurable, adding support for the big endian calling conventions, rewriting memory access patterns and adding big endian versions of the runtime functions. Bug 19418
Diffstat (limited to 'runtime/arm/i64_stof.S')
-rw-r--r--runtime/arm/i64_stof.S28
1 files changed, 14 insertions, 14 deletions
diff --git a/runtime/arm/i64_stof.S b/runtime/arm/i64_stof.S
index bb5e05c0..d8a250c8 100644
--- a/runtime/arm/i64_stof.S
+++ b/runtime/arm/i64_stof.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -37,11 +37,11 @@
#include "sysdeps.h"
@@@ Conversion from signed 64-bit integer to single float
-
+
FUNCTION(__i64_stof)
@ Check whether -2^53 <= X < 2^53
- ASR r2, r1, #21
- ASR r3, r1, #31 @ (r2,r3) = X >> 53
+ ASR r2, Reg0HI, #21
+ ASR r3, Reg0HI, #31 @ (r2,r3) = X >> 53
adds r2, r2, #1
adc r3, r3, #0 @ (r2,r3) = X >> 53 + 1
cmp r3, #2
@@ -49,29 +49,29 @@ FUNCTION(__i64_stof)
@ X is large enough that double rounding can occur.
@ Avoid it by nudging X away from the points where double rounding
@ occurs (the "round to odd" technique)
- MOV r2, #0x700
+ MOV r2, #0x700
ORR r2, r2, #0xFF @ r2 = 0x7FF
- AND r3, r0, r2 @ extract bits 0 to 11 of X
+ AND r3, Reg0LO, r2 @ extract bits 0 to 11 of X
ADD r3, r3, r2 @ r3 = (X & 0x7FF) + 0x7FF
@ bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
@ bits 13-31 of r3 are 0
- ORR r0, r0, r3 @ correct bit number 12 of X
- BIC r0, r0, r2 @ set to 0 bits 0 to 11 of X
+ ORR Reg0LO, Reg0LO, r3 @ correct bit number 12 of X
+ BIC Reg0LO, Reg0LO, r2 @ set to 0 bits 0 to 11 of X
@ Convert to double
-1: vmov s0, r0
+1: vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
- vmov s2, r1
+ vmov s2, Reg0HI
vcvt.f64.s32 d1, s2 @ convert high half to double (signed)
vldr d2, .LC1 @ d2 = 2^32
vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
@ Round to single
vcvt.f32.f64 s0, d0
-#ifdef ABI_eabi
+#ifdef ABI_eabi
@ Return result in r0
- vmov r0, s0
-#endif
+ vmov r0, s0
+#endif
bx lr
ENDFUNCTION(__i64_stof)
-
+
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision