aboutsummaryrefslogtreecommitdiffstats
path: root/runtime/arm
diff options
context:
space:
mode:
authorBernhard Schommer <bernhardschommer@gmail.com>2016-08-05 14:05:34 +0200
committerBernhard Schommer <bernhardschommer@gmail.com>2016-08-05 14:05:34 +0200
commit028aaefc44b8ed8bafd8b8896fedb53f6e68df3c (patch)
treed7f9325da52050a64e5c9ced1017a4f57c674ff3 /runtime/arm
parent4ac759d0bceef49d16197e3bb8c9767ece693c5e (diff)
downloadcompcert-kvx-028aaefc44b8ed8bafd8b8896fedb53f6e68df3c.tar.gz
compcert-kvx-028aaefc44b8ed8bafd8b8896fedb53f6e68df3c.zip
Implement support for big endian arm targets.
Adds support for the big endian arm targets by making the target endianess flag configurable, adding support for the big endian calling conventions, rewriting memory access patterns and adding big endian versions of the runtime functions. Bug 19418
Diffstat (limited to 'runtime/arm')
-rw-r--r--runtime/arm/i64_dtos.S66
-rw-r--r--runtime/arm/i64_dtou.S52
-rw-r--r--runtime/arm/i64_sar.S16
-rw-r--r--runtime/arm/i64_sdiv.S34
-rw-r--r--runtime/arm/i64_shl.S26
-rw-r--r--runtime/arm/i64_shr.S22
-rw-r--r--runtime/arm/i64_smod.S32
-rw-r--r--runtime/arm/i64_stod.S20
-rw-r--r--runtime/arm/i64_stof.S28
-rw-r--r--runtime/arm/i64_udiv.S12
-rw-r--r--runtime/arm/i64_udivmod.S66
-rw-r--r--runtime/arm/i64_utod.S22
-rw-r--r--runtime/arm/i64_utof.S24
-rw-r--r--runtime/arm/sysdeps.h40
-rw-r--r--runtime/arm/vararg.S6
15 files changed, 253 insertions, 213 deletions
diff --git a/runtime/arm/i64_dtos.S b/runtime/arm/i64_dtos.S
index 4557eeab..e31f3f34 100644
--- a/runtime/arm/i64_dtos.S
+++ b/runtime/arm/i64_dtos.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -34,19 +34,19 @@
@ Helper functions for 64-bit integer arithmetic. ARM version.
-#include "sysdeps.h"
+#include "sysdeps.h"
@@@ Conversion from double float to signed 64-bit integer
-
+
FUNCTION(__i64_dtos)
#ifndef ABI_eabi
- vmov r0, r1, d0
-#endif
- ASR r12, r1, #31 @ save sign of result in r12
+ vmov Reg0LO, Reg0HI, d0
+#endif
+ ASR r12, Reg0HI, #31 @ save sign of result in r12
@ extract unbiased exponent ((HI & 0x7FF00000) >> 20) - (1023 + 52) in r2
@ note: 1023 + 52 = 1075 = 1024 + 51
@ note: (HI & 0x7FF00000) >> 20 = (HI << 1) >> 21
- LSL r2, r1, #1
+ LSL r2, Reg0HI, #1
LSR r2, r2, #21
SUB r2, r2, #51
SUB r2, r2, #1024
@@ -56,45 +56,45 @@ FUNCTION(__i64_dtos)
cmp r2, #11 @ if EXP >= 63 - 52, |double| is >= 2^63
bge 2f
@ extract true mantissa
- BIC r1, r1, #0xFF000000
- BIC r1, r1, #0x00F00000 @ HI &= ~0xFFF00000
- ORR r1, r1, #0x00100000 @ HI |= 0x00100000
+ BIC Reg0HI, Reg0HI, #0xFF000000
+ BIC Reg0HI, Reg0HI, #0x00F00000 @ HI &= ~0xFFF00000
+ ORR Reg0HI, Reg0HI, #0x00100000 @ HI |= 0x00100000
@ shift it appropriately
cmp r2, #0
blt 3f
- @ EXP >= 0: shift left by EXP. Note that EXP < 12
+ @ EXP >= 0: shift left by EXP. Note that EXP < 12
rsb r3, r2, #32 @ r3 = 32 - amount
- LSL r1, r1, r2
- LSR r3, r0, r3
- ORR r1, r1, r3
- LSL r0, r0, r2
+ LSL Reg0HI, Reg0HI, r2
+ LSR r3, Reg0LO, r3
+ ORR Reg0HI, Reg0HI, r3
+ LSL Reg0LO, Reg0LO, r2
b 4f
- @ EXP < 0: shift right by -EXP. Note that -EXP <= 52 but can be >= 32
+ @ EXP < 0: shift right by -EXP. Note that -EXP <= 52 but can be >= 32
3: RSB r2, r2, #0 @ r2 = -EXP = shift amount
RSB r3, r2, #32 @ r3 = 32 - amount
- LSR r0, r0, r2
- LSL r3, r1, r3
- ORR r0, r0, r3
- SUB r3, r2, #32 @ r3 = amount - 32 (see i64_shr.s)
- LSR r3, r1, r3
- ORR r0, r0, r3
- LSR r1, r1, r2
+ LSR Reg0LO, Reg0LO, r2
+ LSL r3, Reg0HI, r3
+ ORR Reg0LO, Reg0LO, r3
+ SUB r3, r2, #32 @ r3 = amount - 32 (see i64_shr.s)
+ LSR r3, Reg0HI, r3
+ ORR Reg0LO, Reg0LO, r3
+ LSR Reg0HI, Reg0HI, r2
@ apply sign to result
-4: EOR r0, r0, r12
- EOR r1, r1, r12
- subs r0, r0, r12
- sbc r1, r1, r12
+4: EOR Reg0LO, Reg0LO, r12
+ EOR Reg0HI, Reg0HI, r12
+ subs Reg0LO, Reg0LO, r12
+ sbc Reg0HI, Reg0HI, r12
bx lr
@ special cases
-1: MOV r0, #0 @ result is 0
- MOV r1, #0
+1: MOV Reg0LO, #0 @ result is 0
+ MOV Reg0HI, #0
bx lr
2: cmp r12, #0
blt 6f
- mvn r0, #0 @ result is 0x7F....FF (MAX_SINT)
- LSR r1, r0, #1
+ mvn Reg0LO, #0 @ result is 0x7F....FF (MAX_SINT)
+ LSR Reg0HI, Reg0LO, #1
bx lr
-6: MOV r0, #0 @ result is 0x80....00 (MIN_SINT)
- MOV r1, #0x80000000
+6: MOV Reg0LO, #0 @ result is 0x80....00 (MIN_SINT)
+ MOV Reg0HI, #0x80000000
bx lr
ENDFUNCTION(__i64_dtos)
diff --git a/runtime/arm/i64_dtou.S b/runtime/arm/i64_dtou.S
index 57641909..6e47f3de 100644
--- a/runtime/arm/i64_dtou.S
+++ b/runtime/arm/i64_dtou.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -36,18 +36,18 @@
#include "sysdeps.h"
-@@@ Conversion from double float to unsigned 64-bit integer
+@@@ Conversion from double float to unsigned 64-bit integer
FUNCTION(__i64_dtou)
#ifndef ABI_eabi
- vmov r0, r1, d0
-#endif
- cmp r1, #0 @ is double < 0 ?
+ vmov Reg0LO, Reg0HI, d0
+#endif
+ cmp Reg0HI, #0 @ is double < 0 ?
blt 1f @ then it converts to 0
@ extract unbiased exponent ((HI & 0x7FF00000) >> 20) - (1023 + 52) in r2
@ note: 1023 + 52 = 1075 = 1024 + 51
@ note: (HI & 0x7FF00000) >> 20 = (HI << 1) >> 21
- LSL r2, r1, #1
+ LSL r2, Reg0HI, #1
LSR r2, r2, #21
SUB r2, r2, #51
SUB r2, r2, #1024
@@ -57,35 +57,35 @@ FUNCTION(__i64_dtou)
cmp r2, #12 @ if EXP >= 64 - 52, double is >= 2^64
bge 2f
@ extract true mantissa
- BIC r1, r1, #0xFF000000
- BIC r1, r1, #0x00F00000 @ HI &= ~0xFFF00000
- ORR r1, r1, #0x00100000 @ HI |= 0x00100000
+ BIC Reg0HI, Reg0HI, #0xFF000000
+ BIC Reg0HI, Reg0HI, #0x00F00000 @ HI &= ~0xFFF00000
+ ORR Reg0HI, Reg0HI, #0x00100000 @ HI |= 0x00100000
@ shift it appropriately
cmp r2, #0
blt 3f
- @ EXP >= 0: shift left by EXP. Note that EXP < 12
+ @ EXP >= 0: shift left by EXP. Note that EXP < 12
rsb r3, r2, #32 @ r3 = 32 - amount
- LSL r1, r1, r2
- LSR r3, r0, r3
- ORR r1, r1, r3
- LSL r0, r0, r2
+ LSL Reg0HI, Reg0HI, r2
+ LSR r3, Reg0LO, r3
+ ORR Reg0HI, Reg0HI, r3
+ LSL Reg0LO, Reg0LO, r2
bx lr
- @ EXP < 0: shift right by -EXP. Note that -EXP <= 52 but can be >= 32
+ @ EXP < 0: shift right by -EXP. Note that -EXP <= 52 but can be >= 32
3: RSB r2, r2, #0 @ r2 = -EXP = shift amount
RSB r3, r2, #32 @ r3 = 32 - amount
- LSR r0, r0, r2
- LSL r3, r1, r3
- ORR r0, r0, r3
- SUB r3, r2, #32 @ r3 = amount - 32 (see i64_shr.s)
- LSR r3, r1, r3
- ORR r0, r0, r3
- LSR r1, r1, r2
+ LSR Reg0LO, Reg0LO, r2
+ LSL r3, Reg0HI, r3
+ ORR Reg0LO, Reg0LO, r3
+ SUB r3, r2, #32 @ r3 = amount - 32 (see i64_shr.s)
+ LSR r3, Reg0HI, r3
+ ORR Reg0LO, Reg0LO, r3
+ LSR Reg0HI, Reg0HI, r2
bx lr
@ special cases
-1: MOV r0, #0 @ result is 0
- MOV r1, #0
+1: MOV Reg0LO, #0 @ result is 0
+ MOV Reg0HI, #0
bx lr
-2: mvn r0, #0 @ result is 0xFF....FF (MAX_UINT)
- MOV r1, r0
+2: mvn Reg0LO, #0 @ result is 0xFF....FF (MAX_UINT)
+ MOV Reg0HI, Reg0LO
bx lr
ENDFUNCTION(__i64_dtou)
diff --git a/runtime/arm/i64_sar.S b/runtime/arm/i64_sar.S
index a4d0a1df..dcaff1ac 100644
--- a/runtime/arm/i64_sar.S
+++ b/runtime/arm/i64_sar.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -42,16 +42,16 @@ FUNCTION(__i64_sar)
AND r2, r2, #63 @ normalize amount to 0...63
rsbs r3, r2, #32 @ r3 = 32 - amount
ble 1f @ branch if <= 0, namely if amount >= 32
- LSR r0, r0, r2
- LSL r3, r1, r3
- ORR r0, r0, r3
- ASR r1, r1, r2
+ LSR Reg0LO, Reg0LO, r2
+ LSL r3, Reg0HI, r3
+ ORR Reg0LO, Reg0LO, r3
+ ASR Reg0HI, Reg0HI, r2
bx lr
1:
SUB r2, r2, #32
- ASR r0, r1, r2
- ASR r1, r1, #31
+ ASR Reg0LO, Reg0HI, r2
+ ASR Reg0HI, Reg0HI, #31
bx lr
ENDFUNCTION(__i64_sar)
-
+
diff --git a/runtime/arm/i64_sdiv.S b/runtime/arm/i64_sdiv.S
index dd88c12a..358312da 100644
--- a/runtime/arm/i64_sdiv.S
+++ b/runtime/arm/i64_sdiv.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -36,26 +36,26 @@
#include "sysdeps.h"
-@@@ Signed division
-
+@@@ Signed division
+
FUNCTION(__i64_sdiv)
push {r4, r5, r6, r7, r8, r10, lr}
- ASR r4, r1, #31 @ r4 = sign of N
- ASR r5, r3, #31 @ r5 = sign of D
+ ASR r4, Reg0HI, #31 @ r4 = sign of N
+ ASR r5, Reg1HI, #31 @ r5 = sign of D
EOR r10, r4, r5 @ r10 = sign of result
- EOR r0, r0, r4 @ take absolute value of N
- EOR r1, r1, r4 @ N = (N ^ (N >>s 31)) - (N >>s 31)
- subs r0, r0, r4
- sbc r1, r1, r4
- EOR r2, r2, r5 @ take absolute value of D
- EOR r3, r3, r5
- subs r2, r2, r5
- sbc r3, r3, r5
+ EOR Reg0LO, Reg0LO, r4 @ take absolute value of N
+ EOR Reg0HI, Reg0HI, r4 @ N = (N ^ (N >>s 31)) - (N >>s 31)
+ subs Reg0LO, Reg0LO, r4
+ sbc Reg0HI, Reg0HI, r4
+ EOR Reg1LO, Reg1LO, r5 @ take absolute value of D
+ EOR Reg1HI, Reg1HI, r5
+ subs Reg1LO, Reg1LO, r5
+ sbc Reg1HI, Reg1HI, r5
bl __i64_udivmod @ do unsigned division
- EOR r0, r4, r10 @ apply expected sign
- EOR r1, r5, r10
- subs r0, r0, r10
- sbc r1, r1, r10
+ EOR Reg0LO, Reg2LO, r10 @ apply expected sign
+ EOR Reg0HI, Reg2HI, r10
+ subs Reg0LO, Reg0LO, r10
+ sbc Reg0HI, Reg0HI, r10
pop {r4, r5, r6, r7, r8, r10, lr}
bx lr
ENDFUNCTION(__i64_sdiv)
diff --git a/runtime/arm/i64_shl.S b/runtime/arm/i64_shl.S
index 66569d34..2b558cfe 100644
--- a/runtime/arm/i64_shl.S
+++ b/runtime/arm/i64_shl.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -34,38 +34,38 @@
@ Helper functions for 64-bit integer arithmetic. ARM version.
-#include "sysdeps.h"
+#include "sysdeps.h"
-@@@ Shift left
+@@@ Shift left
@ Note on ARM shifts: the shift amount is taken modulo 256.
@ If shift amount mod 256 >= 32, the shift produces 0.
@ Algorithm:
@ RH = (XH << N) | (XL >> (32-N) | (XL << (N-32))
-@ RL = XL << N
+@ RL = XL << N
@ If N = 0:
@ RH = XH | 0 | 0
@ RL = XL
@ If 1 <= N <= 31: 1 <= 32-N <= 31 and 2s5 <= N-32 mod 256 <= 255
@ RH = (XH << N) | (XL >> (32-N) | 0
-@ RL = XL << N
+@ RL = XL << N
@ If N = 32:
@ RH = 0 | XL | 0
@ RL = 0
@ If 33 <= N <= 63: 225 <= 32-N mod 256 <= 255 and 1 <= N-32 <= 31
@ RH = 0 | 0 | (XL << (N-32))
@ RL = 0
-
+
FUNCTION(__i64_shl)
AND r2, r2, #63 @ normalize amount to 0...63
RSB r3, r2, #32 @ r3 = 32 - amount
- LSL r1, r1, r2
- LSR r3, r0, r3
- ORR r1, r1, r3
- SUB r3, r2, #32 @ r3 = amount - 32
- LSL r3, r0, r3
- ORR r1, r1, r3
- LSL r0, r0, r2
+ LSL Reg0HI, Reg0HI, r2
+ LSR r3, Reg0LO, r3
+ ORR Reg0HI, Reg0HI, r3
+ SUB r3, r2, #32 @ r3 = amount - 32
+ LSL r3, Reg0LO, r3
+ ORR Reg0HI, Reg0HI, r3
+ LSL Reg0LO, Reg0LO, r2
bx lr
ENDFUNCTION(__i64_shl)
diff --git a/runtime/arm/i64_shr.S b/runtime/arm/i64_shr.S
index a5418f4a..43325092 100644
--- a/runtime/arm/i64_shr.S
+++ b/runtime/arm/i64_shr.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -36,20 +36,20 @@
#include "sysdeps.h"
-@@@ Shift right unsigned
+@@@ Shift right unsigned
@ Note on ARM shifts: the shift amount is taken modulo 256.
@ If shift amount mod 256 >= 32, the shift produces 0.
@ Algorithm:
@ RL = (XL >> N) | (XH << (32-N) | (XH >> (N-32))
-@ RH = XH >> N
+@ RH = XH >> N
@ If N = 0:
@ RL = XL | 0 | 0
@ RH = XH
@ If 1 <= N <= 31: 1 <= 32-N <= 31 and 255 <= N-32 mod 256 <= 255
@ RL = (XL >> N) | (XH >> (32-N) | 0
-@ RH = XH >> N
+@ RH = XH >> N
@ If N = 32:
@ RL = 0 | XH | 0
@ RH = 0
@@ -60,12 +60,12 @@
FUNCTION(__i64_shr)
AND r2, r2, #63 @ normalize amount to 0...63
RSB r3, r2, #32 @ r3 = 32 - amount
- LSR r0, r0, r2
- LSL r3, r1, r3
- ORR r0, r0, r3
- SUB r3, r2, #32 @ r3 = amount - 32
- LSR r3, r1, r3
- ORR r0, r0, r3
- LSR r1, r1, r2
+ LSR Reg0LO, Reg0LO, r2
+ LSL r3, Reg0HI, r3
+ ORR Reg0LO, Reg0LO, r3
+ SUB r3, r2, #32 @ r3 = amount - 32
+ LSR r3, Reg0HI, r3
+ ORR Reg0LO, Reg0LO, r3
+ LSR Reg0HI, Reg0HI, r2
bx lr
ENDFUNCTION(__i64_shr)
diff --git a/runtime/arm/i64_smod.S b/runtime/arm/i64_smod.S
index b109ecc3..34c33c1c 100644
--- a/runtime/arm/i64_smod.S
+++ b/runtime/arm/i64_smod.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -37,25 +37,25 @@
#include "sysdeps.h"
@@@ Signed modulus
-
+
FUNCTION(__i64_smod)
push {r4, r5, r6, r7, r8, r10, lr}
- ASR r4, r1, #31 @ r4 = sign of N
- ASR r5, r3, #31 @ r5 = sign of D
+ ASR r4, Reg0HI, #31 @ r4 = sign of N
+ ASR r5, Reg1HI, #31 @ r5 = sign of D
MOV r10, r4 @ r10 = sign of result
- EOR r0, r0, r4 @ take absolute value of N
- EOR r1, r1, r4 @ N = (N ^ (N >>s 31)) - (N >>s 31)
- subs r0, r0, r4
- sbc r1, r1, r4
- EOR r2, r2, r5 @ take absolute value of D
- EOR r3, r3, r5
- subs r2, r2, r5
- sbc r3, r3, r5
+ EOR Reg0LO, Reg0LO, r4 @ take absolute value of N
+ EOR Reg0HI, Reg0HI, r4 @ N = (N ^ (N >>s 31)) - (N >>s 31)
+ subs Reg0LO, Reg0LO, r4
+ sbc Reg0HI, Reg0HI, r4
+ EOR Reg1LO, Reg1LO, r5 @ take absolute value of D
+ EOR Reg1HI, Reg1HI, r5
+ subs Reg1LO, Reg1LO, r5
+ sbc Reg1HI, Reg1HI, r5
bl __i64_udivmod @ do unsigned division
- EOR r0, r0, r10 @ apply expected sign
- EOR r1, r1, r10
- subs r0, r0, r10
- sbc r1, r1, r10
+ EOR Reg0LO, Reg0LO, r10 @ apply expected sign
+ EOR Reg0HI, Reg0HI, r10
+ subs Reg0LO, Reg0LO, r10
+ sbc Reg0HI, Reg0HI, r10
pop {r4, r5, r6, r7, r8, r10, lr}
bx lr
ENDFUNCTION(__i64_smod)
diff --git a/runtime/arm/i64_stod.S b/runtime/arm/i64_stod.S
index e38b466b..82ea9242 100644
--- a/runtime/arm/i64_stod.S
+++ b/runtime/arm/i64_stod.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -37,17 +37,17 @@
#include "sysdeps.h"
@@@ Conversion from signed 64-bit integer to double float
-
+
FUNCTION(__i64_stod)
__i64_stod:
- vmov s0, r0
- vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
- vmov s2, r1
- vcvt.f64.s32 d1, s2 @ convert high half to double (signed)
- vldr d2, .LC1 @ d2 = 2^32
- vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
-#ifdef ABI_eabi
- vmov r0, r1, d0 @ return result in r0, r1
+ vmov s0, Reg0LO
+ vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
+ vmov s2, Reg0HI
+ vcvt.f64.s32 d1, s2 @ convert high half to double (signed)
+ vldr d2, .LC1 @ d2 = 2^32
+ vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
+#ifdef ABI_eabi
+ vmov Reg0LO, Reg0HI, d0 @ return result in register pair r0:r1
#endif
bx lr
ENDFUNCTION(__i64_stod)
diff --git a/runtime/arm/i64_stof.S b/runtime/arm/i64_stof.S
index bb5e05c0..d8a250c8 100644
--- a/runtime/arm/i64_stof.S
+++ b/runtime/arm/i64_stof.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -37,11 +37,11 @@
#include "sysdeps.h"
@@@ Conversion from signed 64-bit integer to single float
-
+
FUNCTION(__i64_stof)
@ Check whether -2^53 <= X < 2^53
- ASR r2, r1, #21
- ASR r3, r1, #31 @ (r2,r3) = X >> 53
+ ASR r2, Reg0HI, #21
+ ASR r3, Reg0HI, #31 @ (r2,r3) = X >> 53
adds r2, r2, #1
adc r3, r3, #0 @ (r2,r3) = X >> 53 + 1
cmp r3, #2
@@ -49,29 +49,29 @@ FUNCTION(__i64_stof)
@ X is large enough that double rounding can occur.
@ Avoid it by nudging X away from the points where double rounding
@ occurs (the "round to odd" technique)
- MOV r2, #0x700
+ MOV r2, #0x700
ORR r2, r2, #0xFF @ r2 = 0x7FF
- AND r3, r0, r2 @ extract bits 0 to 11 of X
+ AND r3, Reg0LO, r2 @ extract bits 0 to 11 of X
ADD r3, r3, r2 @ r3 = (X & 0x7FF) + 0x7FF
@ bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
@ bits 13-31 of r3 are 0
- ORR r0, r0, r3 @ correct bit number 12 of X
- BIC r0, r0, r2 @ set to 0 bits 0 to 11 of X
+ ORR Reg0LO, Reg0LO, r3 @ correct bit number 12 of X
+ BIC Reg0LO, Reg0LO, r2 @ set to 0 bits 0 to 11 of X
@ Convert to double
-1: vmov s0, r0
+1: vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
- vmov s2, r1
+ vmov s2, Reg0HI
vcvt.f64.s32 d1, s2 @ convert high half to double (signed)
vldr d2, .LC1 @ d2 = 2^32
vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
@ Round to single
vcvt.f32.f64 s0, d0
-#ifdef ABI_eabi
+#ifdef ABI_eabi
@ Return result in r0
- vmov r0, s0
-#endif
+ vmov r0, s0
+#endif
bx lr
ENDFUNCTION(__i64_stof)
-
+
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
diff --git a/runtime/arm/i64_udiv.S b/runtime/arm/i64_udiv.S
index 3b599442..316b7647 100644
--- a/runtime/arm/i64_udiv.S
+++ b/runtime/arm/i64_udiv.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -36,13 +36,13 @@
#include "sysdeps.h"
-@@@ Unsigned division
-
+@@@ Unsigned division
+
FUNCTION(__i64_udiv)
push {r4, r5, r6, r7, r8, lr}
bl __i64_udivmod
- MOV r0, r4
- MOV r1, r5
+ MOV Reg0LO, Reg2LO
+ MOV Reg0HI, Reg2HI
pop {r4, r5, r6, r7, r8, lr}
bx lr
-ENDFUNCTION(__i64_udiv)
+ENDFUNCTION(__i64_udiv)
diff --git a/runtime/arm/i64_udivmod.S b/runtime/arm/i64_udivmod.S
index e5373ad4..4ba99bc9 100644
--- a/runtime/arm/i64_udivmod.S
+++ b/runtime/arm/i64_udivmod.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -38,42 +38,42 @@
@@@ Auxiliary function for division and modulus. Don't call from C
-@ On entry: N = (r0, r1) numerator D = (r2, r3) divisor
-@ On exit: Q = (r4, r5) quotient R = (r0, r1) remainder
-@ Locals: M = (r6, r7) mask TMP = r8 temporary
-
+@ On entry: N = (r0, r1) numerator D = (r2, r3) divisor
+@ On exit: Q = (r4, r5) quotient R = (r0, r1) remainder
+@ Locals: M = (r6, r7) mask TMP = r8 temporary
+
FUNCTION(__i64_udivmod)
- orrs r8, r2, r3 @ is D == 0?
- it eq
- bxeq lr @ if so, return with unspecified results
- MOV r4, #0 @ Q = 0
- MOV r5, #0
- MOV r6, #1 @ M = 1
- MOV r7, #0
-1: cmp r3, #0 @ while ((signed) D >= 0) ...
+ orrs r8, Reg1LO, Reg1HI @ is D == 0?
+ it eq
+ bxeq lr @ if so, return with unspecified results
+ MOV Reg2LO, #0 @ Q = 0
+ MOV Reg2HI, #0
+ MOV Reg3LO, #1 @ M = 1
+ MOV Reg3HI, #0
+1: cmp Reg1HI, #0 @ while ((signed) D >= 0) ...
blt 2f
- subs r8, r0, r2 @ ... and N >= D ...
- sbcs r8, r1, r3
+ subs r8, Reg0LO, Reg1LO @ ... and N >= D ...
+ sbcs r8, Reg0HI, Reg1HI
blo 2f
- adds r2, r2, r2 @ D = D << 1
- adc r3, r3, r3
- adds r6, r6, r6 @ M = M << 1
- adc r7, r7, r7
+ adds Reg1LO, Reg1LO, Reg1LO @ D = D << 1
+ adc Reg1HI, Reg1HI, Reg1HI
+ adds Reg3LO, Reg3LO, Reg3LO @ M = M << 1
+ adc Reg3HI, Reg3HI, Reg3HI
b 1b
-2: subs r0, r0, r2 @ N = N - D
- sbcs r1, r1, r3
- orr r4, r4, r6 @ Q = Q | M
- orr r5, r5, r7
- bhs 3f @ if N was >= D, continue
- adds r0, r0, r2 @ otherwise, undo what we just did
- adc r1, r1, r3 @ N = N + D
- bic r4, r4, r6 @ Q = Q & ~M
- bic r5, r5, r7
-3: lsrs r7, r7, #1 @ M = M >> 1
- rrx r6, r6
- lsrs r3, r3, #1 @ D = D >> 1
- rrx r2, r2
- orrs r8, r6, r7 @ repeat while (M != 0) ...
+2: subs Reg0LO, Reg0LO, Reg1LO @ N = N - D
+ sbcs Reg0HI, Reg0HI, Reg1HI
+ orr Reg2LO, Reg2LO, Reg3LO @ Q = Q | M
+ orr Reg2HI, Reg2HI, Reg3HI
+ bhs 3f @ if N was >= D, continue
+ adds Reg0LO, Reg0LO, Reg1LO @ otherwise, undo what we just did
+ adc Reg0HI, Reg0HI, Reg1HI @ N = N + D
+ bic Reg2LO, Reg2LO, Reg3LO @ Q = Q & ~M
+ bic Reg2HI, Reg2HI, Reg3HI
+3: lsrs Reg3HI, Reg3HI, #1 @ M = M >> 1
+ rrx Reg3LO, Reg3LO
+ lsrs Reg1HI, Reg1HI, #1 @ D = D >> 1
+ rrx Reg1LO, Reg1LO
+ orrs r8, Reg3LO, Reg3HI @ repeat while (M != 0) ...
bne 2b
bx lr
ENDFUNCTION(__i64_udivmod)
diff --git a/runtime/arm/i64_utod.S b/runtime/arm/i64_utod.S
index b4c30754..593f8543 100644
--- a/runtime/arm/i64_utod.S
+++ b/runtime/arm/i64_utod.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -37,17 +37,17 @@
#include "sysdeps.h"
@@@ Conversion from unsigned 64-bit integer to double float
-
+
FUNCTION(__i64_utod)
-__i64_stod:
- vmov s0, r0
- vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
- vmov s2, r1
- vcvt.f64.u32 d1, s2 @ convert high half to double (unsigned)
- vldr d2, .LC1 @ d2 = 2^32
- vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
-#ifdef ABI_eabi
- vmov r0, r1, d0 @ return result in r0, r1
+__i64_utod:
+ vmov s0, Reg0LO
+ vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
+ vmov s2, Reg0HI
+ vcvt.f64.u32 d1, s2 @ convert high half to double (unsigned)
+ vldr d2, .LC1 @ d2 = 2^32
+ vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
+#ifdef ABI_eabi
+ vmov Reg0LO, Reg0HI, d0 @ return result in register pair r0:r1
#endif
bx lr
ENDFUNCTION(__i64_utod)
diff --git a/runtime/arm/i64_utof.S b/runtime/arm/i64_utof.S
index fbd325c8..be0ecc6a 100644
--- a/runtime/arm/i64_utof.S
+++ b/runtime/arm/i64_utof.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -37,35 +37,35 @@
#include "sysdeps.h"
@@@ Conversion from unsigned 64-bit integer to single float
-
+
FUNCTION(__i64_utof)
@ Check whether X < 2^53
- lsrs r2, r1, #21 @ test if X >> 53 == 0
+ lsrs r2, Reg0HI, #21 @ test if X >> 53 == 0
beq 1f
@ X is large enough that double rounding can occur.
@ Avoid it by nudging X away from the points where double rounding
@ occurs (the "round to odd" technique)
- MOV r2, #0x700
+ MOV r2, #0x700
ORR r2, r2, #0xFF @ r2 = 0x7FF
- AND r3, r0, r2 @ extract bits 0 to 11 of X
+ AND r3, Reg0LO, r2 @ extract bits 0 to 11 of X
ADD r3, r3, r2 @ r3 = (X & 0x7FF) + 0x7FF
@ bit 12 of r3 is 0 if all low 12 bits of X are 0, 1 otherwise
@ bits 13-31 of r3 are 0
- ORR r0, r0, r3 @ correct bit number 12 of X
- BIC r0, r0, r2 @ set to 0 bits 0 to 11 of X
+ ORR Reg0LO, Reg0LO, r3 @ correct bit number 12 of X
+ BIC Reg0LO, Reg0LO, r2 @ set to 0 bits 0 to 11 of X
@ Convert to double
-1: vmov s0, r0
+1: vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
- vmov s2, r1
+ vmov s2, Reg0HI
vcvt.f64.u32 d1, s2 @ convert high half to double (unsigned)
vldr d2, .LC1 @ d2 = 2^32
vmla.f64 d0, d1, d2 @ d0 = d0 + d1 * d2 = double value of int64
@ Round to single
vcvt.f32.f64 s0, d0
-#ifdef ABI_eabi
+#ifdef ABI_eabi
@ Return result in r0
- vmov r0, s0
-#endif
+ vmov r0, s0
+#endif
bx lr
ENDFUNCTION(__i64_utof)
diff --git a/runtime/arm/sysdeps.h b/runtime/arm/sysdeps.h
index 3d6a702c..fd4ea61d 100644
--- a/runtime/arm/sysdeps.h
+++ b/runtime/arm/sysdeps.h
@@ -95,3 +95,43 @@ f:
.arch armv7
#endif
.fpu vfpv2
+
+
+
+// Endianness dependencies
+
+// Location of high and low word of first register pair (r0:r1)
+#ifdef ENDIANNESS_big
+#define Reg0HI r0
+#define Reg0LO r1
+#else
+#define Reg0HI r1
+#define Reg0LO r0
+#endif
+
+// Location of high and low word of second register pair (r2:r3)
+#ifdef ENDIANNESS_big
+#define Reg1HI r2
+#define Reg1LO r3
+#else
+#define Reg1HI r3
+#define Reg1LO r2
+#endif
+
+// Location of high and low word of third register pair (r4:r5)
+#ifdef ENDIANNESS_big
+#define Reg2HI r4
+#define Reg2LO r5
+#else
+#define Reg2HI r5
+#define Reg2LO r4
+#endif
+
+// Location of high and low word of fourth register pair (r6:r7)
+#ifdef ENDIANNESS_big
+#define Reg3HI r6
+#define Reg3LO r7
+#else
+#define Reg3HI r7
+#define Reg3LO r6
+#endif
diff --git a/runtime/arm/vararg.S b/runtime/arm/vararg.S
index 5e319b8b..6f446ca8 100644
--- a/runtime/arm/vararg.S
+++ b/runtime/arm/vararg.S
@@ -17,7 +17,7 @@
@ * Neither the name of the <organization> nor the
@ names of its contributors may be used to endorse or promote products
@ derived from this software without specific prior written permission.
-@
+@
@ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
@ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -70,9 +70,9 @@ FUNCTION(__compcert_va_float64)
#ifdef ABI_eabi
ldr r0, [r1, #-8] @ load next argument and return it in r0,r1
ldr r1, [r1, #-4]
-#else
+#else
vldr d0, [r1, #-8] @ load next argument and return it in d0
-#endif
+#endif
bx lr
ENDFUNCTION(__compcert_va_float64)