aboutsummaryrefslogtreecommitdiffstats
path: root/runtime/arm
diff options
context:
space:
mode:
authorBernhard Schommer <bernhardschommer@gmail.com>2017-08-25 14:55:22 +0200
committerBernhard Schommer <bernhardschommer@gmail.com>2017-08-25 14:55:22 +0200
commit4affb2b02e486681b39add0dbaf4f873a91885c8 (patch)
tree06b045ee98934b00c341071758c9b905ecad057f /runtime/arm
parent95ed4ea7df3e4b05d623afb9cb65f0eb2653361b (diff)
downloadcompcert-kvx-4affb2b02e486681b39add0dbaf4f873a91885c8.tar.gz
compcert-kvx-4affb2b02e486681b39add0dbaf4f873a91885c8.zip
Prefixed runtime functions.
The runtime functions are prefixed with compcert in order to avoid potential clashes with runtime/builtin functions of other compilers. Bug 22062
Diffstat (limited to 'runtime/arm')
-rw-r--r--runtime/arm/i64_dtos.S4
-rw-r--r--runtime/arm/i64_dtou.S4
-rw-r--r--runtime/arm/i64_sar.S4
-rw-r--r--runtime/arm/i64_sdiv.S6
-rw-r--r--runtime/arm/i64_shl.S4
-rw-r--r--runtime/arm/i64_shr.S4
-rw-r--r--runtime/arm/i64_smod.S6
-rw-r--r--runtime/arm/i64_smulh.S4
-rw-r--r--runtime/arm/i64_stod.S6
-rw-r--r--runtime/arm/i64_stof.S4
-rw-r--r--runtime/arm/i64_udiv.S6
-rw-r--r--runtime/arm/i64_udivmod.S4
-rw-r--r--runtime/arm/i64_umod.S6
-rw-r--r--runtime/arm/i64_umulh.S4
-rw-r--r--runtime/arm/i64_utod.S6
-rw-r--r--runtime/arm/i64_utof.S4
16 files changed, 38 insertions, 38 deletions
diff --git a/runtime/arm/i64_dtos.S b/runtime/arm/i64_dtos.S
index e31f3f34..d633dfdf 100644
--- a/runtime/arm/i64_dtos.S
+++ b/runtime/arm/i64_dtos.S
@@ -38,7 +38,7 @@
@@@ Conversion from double float to signed 64-bit integer
-FUNCTION(__i64_dtos)
+FUNCTION(__compcert_i64_dtos)
#ifndef ABI_eabi
vmov Reg0LO, Reg0HI, d0
#endif
@@ -97,4 +97,4 @@ FUNCTION(__i64_dtos)
6: MOV Reg0LO, #0 @ result is 0x80....00 (MIN_SINT)
MOV Reg0HI, #0x80000000
bx lr
-ENDFUNCTION(__i64_dtos)
+ENDFUNCTION(__compcert_i64_dtos)
diff --git a/runtime/arm/i64_dtou.S b/runtime/arm/i64_dtou.S
index 6e47f3de..4fa3350b 100644
--- a/runtime/arm/i64_dtou.S
+++ b/runtime/arm/i64_dtou.S
@@ -38,7 +38,7 @@
@@@ Conversion from double float to unsigned 64-bit integer
-FUNCTION(__i64_dtou)
+FUNCTION(__compcert_i64_dtou)
#ifndef ABI_eabi
vmov Reg0LO, Reg0HI, d0
#endif
@@ -88,4 +88,4 @@ FUNCTION(__i64_dtou)
2: mvn Reg0LO, #0 @ result is 0xFF....FF (MAX_UINT)
MOV Reg0HI, Reg0LO
bx lr
-ENDFUNCTION(__i64_dtou)
+ENDFUNCTION(__compcert_i64_dtou)
diff --git a/runtime/arm/i64_sar.S b/runtime/arm/i64_sar.S
index dcaff1ac..d4412ea0 100644
--- a/runtime/arm/i64_sar.S
+++ b/runtime/arm/i64_sar.S
@@ -38,7 +38,7 @@
@@@ Shift right signed
-FUNCTION(__i64_sar)
+FUNCTION(__compcert_i64_sar)
AND r2, r2, #63 @ normalize amount to 0...63
rsbs r3, r2, #32 @ r3 = 32 - amount
ble 1f @ branch if <= 0, namely if amount >= 32
@@ -52,6 +52,6 @@ FUNCTION(__i64_sar)
ASR Reg0LO, Reg0HI, r2
ASR Reg0HI, Reg0HI, #31
bx lr
-ENDFUNCTION(__i64_sar)
+ENDFUNCTION(__compcert_i64_sar)
diff --git a/runtime/arm/i64_sdiv.S b/runtime/arm/i64_sdiv.S
index 358312da..24519e8f 100644
--- a/runtime/arm/i64_sdiv.S
+++ b/runtime/arm/i64_sdiv.S
@@ -38,7 +38,7 @@
@@@ Signed division
-FUNCTION(__i64_sdiv)
+FUNCTION(__compcert_i64_sdiv)
push {r4, r5, r6, r7, r8, r10, lr}
ASR r4, Reg0HI, #31 @ r4 = sign of N
ASR r5, Reg1HI, #31 @ r5 = sign of D
@@ -51,11 +51,11 @@ FUNCTION(__i64_sdiv)
EOR Reg1HI, Reg1HI, r5
subs Reg1LO, Reg1LO, r5
sbc Reg1HI, Reg1HI, r5
- bl __i64_udivmod @ do unsigned division
+ bl __compcert_i64_udivmod @ do unsigned division
EOR Reg0LO, Reg2LO, r10 @ apply expected sign
EOR Reg0HI, Reg2HI, r10
subs Reg0LO, Reg0LO, r10
sbc Reg0HI, Reg0HI, r10
pop {r4, r5, r6, r7, r8, r10, lr}
bx lr
-ENDFUNCTION(__i64_sdiv)
+ENDFUNCTION(__compcert_i64_sdiv)
diff --git a/runtime/arm/i64_shl.S b/runtime/arm/i64_shl.S
index 2b558cfe..cef5a766 100644
--- a/runtime/arm/i64_shl.S
+++ b/runtime/arm/i64_shl.S
@@ -57,7 +57,7 @@
@ RH = 0 | 0 | (XL << (N-32))
@ RL = 0
-FUNCTION(__i64_shl)
+FUNCTION(__compcert_i64_shl)
AND r2, r2, #63 @ normalize amount to 0...63
RSB r3, r2, #32 @ r3 = 32 - amount
LSL Reg0HI, Reg0HI, r2
@@ -68,4 +68,4 @@ FUNCTION(__i64_shl)
ORR Reg0HI, Reg0HI, r3
LSL Reg0LO, Reg0LO, r2
bx lr
-ENDFUNCTION(__i64_shl)
+ENDFUNCTION(__compcert_i64_shl)
diff --git a/runtime/arm/i64_shr.S b/runtime/arm/i64_shr.S
index 43325092..0f75eb2b 100644
--- a/runtime/arm/i64_shr.S
+++ b/runtime/arm/i64_shr.S
@@ -57,7 +57,7 @@
@ RL = 0 | 0 | (XH >> (N-32))
@ RH = 0
-FUNCTION(__i64_shr)
+FUNCTION(__compcert_i64_shr)
AND r2, r2, #63 @ normalize amount to 0...63
RSB r3, r2, #32 @ r3 = 32 - amount
LSR Reg0LO, Reg0LO, r2
@@ -68,4 +68,4 @@ FUNCTION(__i64_shr)
ORR Reg0LO, Reg0LO, r3
LSR Reg0HI, Reg0HI, r2
bx lr
-ENDFUNCTION(__i64_shr)
+ENDFUNCTION(__compcert_i64_shr)
diff --git a/runtime/arm/i64_smod.S b/runtime/arm/i64_smod.S
index 34c33c1c..24a8f19d 100644
--- a/runtime/arm/i64_smod.S
+++ b/runtime/arm/i64_smod.S
@@ -38,7 +38,7 @@
@@@ Signed modulus
-FUNCTION(__i64_smod)
+FUNCTION(__compcert_i64_smod)
push {r4, r5, r6, r7, r8, r10, lr}
ASR r4, Reg0HI, #31 @ r4 = sign of N
ASR r5, Reg1HI, #31 @ r5 = sign of D
@@ -51,11 +51,11 @@ FUNCTION(__i64_smod)
EOR Reg1HI, Reg1HI, r5
subs Reg1LO, Reg1LO, r5
sbc Reg1HI, Reg1HI, r5
- bl __i64_udivmod @ do unsigned division
+ bl __compcert_i64_udivmod @ do unsigned division
EOR Reg0LO, Reg0LO, r10 @ apply expected sign
EOR Reg0HI, Reg0HI, r10
subs Reg0LO, Reg0LO, r10
sbc Reg0HI, Reg0HI, r10
pop {r4, r5, r6, r7, r8, r10, lr}
bx lr
-ENDFUNCTION(__i64_smod)
+ENDFUNCTION(__compcert_i64_smod)
diff --git a/runtime/arm/i64_smulh.S b/runtime/arm/i64_smulh.S
index 476f51ce..5f32ff61 100644
--- a/runtime/arm/i64_smulh.S
+++ b/runtime/arm/i64_smulh.S
@@ -43,7 +43,7 @@
@ - subtract X if Y < 0
@ - subtract Y if X < 0
-FUNCTION(__i64_smulh)
+FUNCTION(__compcert_i64_smulh)
push {r4, r5, r6, r7}
@@@ r7:r6 accumulate bits 95-32 of the full product
umull r4, r6, Reg0LO, Reg1LO @ r6 = high half of XL.YL product
@@ -74,4 +74,4 @@ FUNCTION(__i64_smulh)
mov Reg0HI, r6
pop {r4, r5, r6, r7}
bx lr
-ENDFUNCTION(__i64_smulh)
+ENDFUNCTION(__compcert_i64_smulh)
diff --git a/runtime/arm/i64_stod.S b/runtime/arm/i64_stod.S
index 82ea9242..e4b220b4 100644
--- a/runtime/arm/i64_stod.S
+++ b/runtime/arm/i64_stod.S
@@ -38,8 +38,8 @@
@@@ Conversion from signed 64-bit integer to double float
-FUNCTION(__i64_stod)
-__i64_stod:
+FUNCTION(__compcert_i64_stod)
+__compcert_i64_stod:
vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
vmov s2, Reg0HI
@@ -50,7 +50,7 @@ __i64_stod:
vmov Reg0LO, Reg0HI, d0 @ return result in register pair r0:r1
#endif
bx lr
-ENDFUNCTION(__i64_stod)
+ENDFUNCTION(__compcert_i64_stod)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
diff --git a/runtime/arm/i64_stof.S b/runtime/arm/i64_stof.S
index d8a250c8..bcfa471c 100644
--- a/runtime/arm/i64_stof.S
+++ b/runtime/arm/i64_stof.S
@@ -38,7 +38,7 @@
@@@ Conversion from signed 64-bit integer to single float
-FUNCTION(__i64_stof)
+FUNCTION(__compcert_i64_stof)
@ Check whether -2^53 <= X < 2^53
ASR r2, Reg0HI, #21
ASR r3, Reg0HI, #31 @ (r2,r3) = X >> 53
@@ -71,7 +71,7 @@ FUNCTION(__i64_stof)
vmov r0, s0
#endif
bx lr
-ENDFUNCTION(__i64_stof)
+ENDFUNCTION(__compcert_i64_stof)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
diff --git a/runtime/arm/i64_udiv.S b/runtime/arm/i64_udiv.S
index 316b7647..91e4ec2a 100644
--- a/runtime/arm/i64_udiv.S
+++ b/runtime/arm/i64_udiv.S
@@ -38,11 +38,11 @@
@@@ Unsigned division
-FUNCTION(__i64_udiv)
+FUNCTION(__compcert_i64_udiv)
push {r4, r5, r6, r7, r8, lr}
- bl __i64_udivmod
+ bl __compcert_i64_udivmod
MOV Reg0LO, Reg2LO
MOV Reg0HI, Reg2HI
pop {r4, r5, r6, r7, r8, lr}
bx lr
-ENDFUNCTION(__i64_udiv)
+ENDFUNCTION(__compcert_i64_udiv)
diff --git a/runtime/arm/i64_udivmod.S b/runtime/arm/i64_udivmod.S
index 4ba99bc9..c9b11692 100644
--- a/runtime/arm/i64_udivmod.S
+++ b/runtime/arm/i64_udivmod.S
@@ -42,7 +42,7 @@
@ On exit: Q = (r4, r5) quotient R = (r0, r1) remainder
@ Locals: M = (r6, r7) mask TMP = r8 temporary
-FUNCTION(__i64_udivmod)
+FUNCTION(__compcert_i64_udivmod)
orrs r8, Reg1LO, Reg1HI @ is D == 0?
it eq
bxeq lr @ if so, return with unspecified results
@@ -76,4 +76,4 @@ FUNCTION(__i64_udivmod)
orrs r8, Reg3LO, Reg3HI @ repeat while (M != 0) ...
bne 2b
bx lr
-ENDFUNCTION(__i64_udivmod)
+ENDFUNCTION(__compcert_i64_udivmod)
diff --git a/runtime/arm/i64_umod.S b/runtime/arm/i64_umod.S
index e59fd203..b6e56ab2 100644
--- a/runtime/arm/i64_umod.S
+++ b/runtime/arm/i64_umod.S
@@ -38,9 +38,9 @@
@@@ Unsigned remainder
-FUNCTION(__i64_umod)
+FUNCTION(__compcert_i64_umod)
push {r4, r5, r6, r7, r8, lr}
- bl __i64_udivmod @ remainder is already in r0,r1
+ bl __compcert_i64_udivmod @ remainder is already in r0,r1
pop {r4, r5, r6, r7, r8, lr}
bx lr
-ENDFUNCTION(__i64_umod)
+ENDFUNCTION(__compcert_i64_umod)
diff --git a/runtime/arm/i64_umulh.S b/runtime/arm/i64_umulh.S
index c14f0c6b..8a7bf1c8 100644
--- a/runtime/arm/i64_umulh.S
+++ b/runtime/arm/i64_umulh.S
@@ -40,7 +40,7 @@
@ X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
-FUNCTION(__i64_umulh)
+FUNCTION(__compcert_i64_umulh)
push {r4, r5, r6, r7}
@@@ r7:r6 accumulate bits 95-32 of the full product
umull r4, r6, Reg0LO, Reg1LO @ r6 = high half of XL.YL product
@@ -58,4 +58,4 @@ FUNCTION(__i64_umulh)
ADC Reg0HI, r6, r5
pop {r4, r5, r6, r7}
bx lr
-ENDFUNCTION(__i64_umulh)
+ENDFUNCTION(__compcert_i64_umulh)
diff --git a/runtime/arm/i64_utod.S b/runtime/arm/i64_utod.S
index 593f8543..af7bcc71 100644
--- a/runtime/arm/i64_utod.S
+++ b/runtime/arm/i64_utod.S
@@ -38,8 +38,8 @@
@@@ Conversion from unsigned 64-bit integer to double float
-FUNCTION(__i64_utod)
-__i64_utod:
+FUNCTION(__compcert_i64_utod)
+__compcert_i64_utod:
vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
vmov s2, Reg0HI
@@ -50,7 +50,7 @@ __i64_utod:
vmov Reg0LO, Reg0HI, d0 @ return result in register pair r0:r1
#endif
bx lr
-ENDFUNCTION(__i64_utod)
+ENDFUNCTION(__compcert_i64_utod)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
diff --git a/runtime/arm/i64_utof.S b/runtime/arm/i64_utof.S
index be0ecc6a..66b146a9 100644
--- a/runtime/arm/i64_utof.S
+++ b/runtime/arm/i64_utof.S
@@ -38,7 +38,7 @@
@@@ Conversion from unsigned 64-bit integer to single float
-FUNCTION(__i64_utof)
+FUNCTION(__compcert_i64_utof)
@ Check whether X < 2^53
lsrs r2, Reg0HI, #21 @ test if X >> 53 == 0
beq 1f
@@ -67,7 +67,7 @@ FUNCTION(__i64_utof)
vmov r0, s0
#endif
bx lr
-ENDFUNCTION(__i64_utof)
+ENDFUNCTION(__compcert_i64_utof)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision