aboutsummaryrefslogtreecommitdiffstats
path: root/runtime/arm/i64_stof.S
diff options
context:
space:
mode:
authorXavier Leroy <xavierleroy@users.noreply.github.com>2016-08-24 11:24:59 +0200
committerGitHub <noreply@github.com>2016-08-24 11:24:59 +0200
commit0a7288fb65ebaed329e06c1fd14aef83e8defcda (patch)
treec2c7ac666c62be0f97a20c74286e0457890ddd8d /runtime/arm/i64_stof.S
parent954b01e1ac6189f4a8b5ad1b6accf6eb01261d1f (diff)
parente0f0f573a4a8fc1f564a31388afa9c23e48bb016 (diff)
downloadcompcert-0a7288fb65ebaed329e06c1fd14aef83e8defcda.tar.gz
compcert-0a7288fb65ebaed329e06c1fd14aef83e8defcda.zip
Merge pull request #118 from AbsInt/armeb
Support for ARM Big Endian
Diffstat (limited to 'runtime/arm/i64_stof.S')
-rw-r--r--runtime/arm/i64_stof.S28
1 files changed, 14 insertions, 14 deletions
diff --git a/runtime/arm/i64_stof.S b/runtime/arm/i64_stof.S
index bb5e05c0..d8a250c8 100644
--- a/runtime/arm/i64_stof.S
+++ b/runtime/arm/i64_stof.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -37,11 +37,11 @@
#include "sysdeps.h"
@@@ Conversion from signed 64-bit integer to single float
-
+
FUNCTION(__i64_stof)
@ Check whether -2^53 <= X < 2^53
- ASR r2, r1, #21
- ASR r3, r1, #31 @ (r2,r3) = X >> 53
+ ASR r2, Reg0HI, #21
+ ASR r3, Reg0HI, #31 @ (r2,r3) = X >> 53
adds r2, r2, #1
adc r3, r3, #0 @ (r2,r3) = X >> 53 + 1
cmp r3, #2
@@ -49,29 +49,29 @@ FUNCTION(__i64_stof)
@ X is large enough that double rounding can occur.
@ Avoid it by nudging X away from the points where double rounding
@ occurs (the "round to odd" technique)
- MOV r2, #0x700
+ MOV r2, #0x700
ORR r2, r2, #0xFF @ r2 = 0x7FF
- AND r3, r0, r2 @ extract bits 0 to 11 of X
+ AND r3, Reg0LO, r2 @ extract bits 0 to 11 of X
ADD r3, r3, r2 @ r3 = (X & 0x7FF) + 0x7FF
@ bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
@ bits 13-31 of r3 are 0
- ORR r0, r0, r3 @ correct bit number 12 of X
- BIC r0, r0, r2 @ set to 0 bits 0 to 11 of X
+ ORR Reg0LO, Reg0LO, r3 @ correct bit number 12 of X
+ BIC Reg0LO, Reg0LO, r2 @ set to 0 bits 0 to 11 of X
@ Convert to double
-1: vmov s0, r0
+1: vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
- vmov s2, r1
+ vmov s2, Reg0HI
vcvt.f64.s32 d1, s2 @ convert high half to double (signed)
vldr d2, .LC1 @ d2 = 2^32
vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
@ Round to single
vcvt.f32.f64 s0, d0
-#ifdef ABI_eabi
+#ifdef ABI_eabi
@ Return result in r0
- vmov r0, s0
-#endif
+ vmov r0, s0
+#endif
bx lr
ENDFUNCTION(__i64_stof)
-
+
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision