aboutsummaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorBernhard Schommer <bernhardschommer@gmail.com>2017-08-25 14:55:22 +0200
committerBernhard Schommer <bernhardschommer@gmail.com>2017-08-25 14:55:22 +0200
commit4affb2b02e486681b39add0dbaf4f873a91885c8 (patch)
tree06b045ee98934b00c341071758c9b905ecad057f /runtime
parent95ed4ea7df3e4b05d623afb9cb65f0eb2653361b (diff)
downloadcompcert-kvx-4affb2b02e486681b39add0dbaf4f873a91885c8.tar.gz
compcert-kvx-4affb2b02e486681b39add0dbaf4f873a91885c8.zip
Prefixed runtime functions.
The runtime functions are prefixed with compcert in order to avoid potential clashes with runtime/builtin functions of other compilers. Bug 22062
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Makefile4
-rw-r--r--runtime/arm/i64_dtos.S4
-rw-r--r--runtime/arm/i64_dtou.S4
-rw-r--r--runtime/arm/i64_sar.S4
-rw-r--r--runtime/arm/i64_sdiv.S6
-rw-r--r--runtime/arm/i64_shl.S4
-rw-r--r--runtime/arm/i64_shr.S4
-rw-r--r--runtime/arm/i64_smod.S6
-rw-r--r--runtime/arm/i64_smulh.S4
-rw-r--r--runtime/arm/i64_stod.S6
-rw-r--r--runtime/arm/i64_stof.S4
-rw-r--r--runtime/arm/i64_udiv.S6
-rw-r--r--runtime/arm/i64_udivmod.S4
-rw-r--r--runtime/arm/i64_umod.S6
-rw-r--r--runtime/arm/i64_umulh.S4
-rw-r--r--runtime/arm/i64_utod.S6
-rw-r--r--runtime/arm/i64_utof.S4
-rw-r--r--runtime/powerpc/i64_dtos.s8
-rw-r--r--runtime/powerpc/i64_dtou.s8
-rw-r--r--runtime/powerpc/i64_sar.s8
-rw-r--r--runtime/powerpc/i64_sdiv.s10
-rw-r--r--runtime/powerpc/i64_shl.s8
-rw-r--r--runtime/powerpc/i64_shr.s8
-rw-r--r--runtime/powerpc/i64_smod.s10
-rw-r--r--runtime/powerpc/i64_smulh.s8
-rw-r--r--runtime/powerpc/i64_stod.s8
-rw-r--r--runtime/powerpc/i64_stof.s10
-rw-r--r--runtime/powerpc/i64_udiv.s10
-rw-r--r--runtime/powerpc/i64_udivmod.s18
-rw-r--r--runtime/powerpc/i64_umod.s10
-rw-r--r--runtime/powerpc/i64_umulh.s8
-rw-r--r--runtime/powerpc/i64_utod.s8
-rw-r--r--runtime/powerpc/i64_utof.s10
-rw-r--r--runtime/powerpc64/i64_dtou.s8
-rw-r--r--runtime/powerpc64/i64_stof.s8
-rw-r--r--runtime/powerpc64/i64_utod.s8
-rw-r--r--runtime/powerpc64/i64_utof.s10
-rw-r--r--runtime/test/test_int64.c68
-rw-r--r--runtime/x86_32/i64_dtos.S4
-rw-r--r--runtime/x86_32/i64_dtou.S4
-rw-r--r--runtime/x86_32/i64_sar.S4
-rw-r--r--runtime/x86_32/i64_sdiv.S6
-rw-r--r--runtime/x86_32/i64_shl.S4
-rw-r--r--runtime/x86_32/i64_shr.S4
-rw-r--r--runtime/x86_32/i64_smod.S6
-rw-r--r--runtime/x86_32/i64_smulh.S4
-rw-r--r--runtime/x86_32/i64_stod.S4
-rw-r--r--runtime/x86_32/i64_stof.S4
-rw-r--r--runtime/x86_32/i64_udiv.S6
-rw-r--r--runtime/x86_32/i64_udivmod.S4
-rw-r--r--runtime/x86_32/i64_umod.S6
-rw-r--r--runtime/x86_32/i64_umulh.S4
-rw-r--r--runtime/x86_32/i64_utod.S4
-rw-r--r--runtime/x86_32/i64_utof.S4
-rw-r--r--runtime/x86_64/i64_dtou.S4
-rw-r--r--runtime/x86_64/i64_utod.S4
-rw-r--r--runtime/x86_64/i64_utof.S4
57 files changed, 208 insertions, 208 deletions
diff --git a/runtime/Makefile b/runtime/Makefile
index 213779a4..27ad6e8c 100644
--- a/runtime/Makefile
+++ b/runtime/Makefile
@@ -55,13 +55,13 @@ $(LIB): $(OBJS)
$(CASMRUNTIME) -DMODEL_$(MODEL) -DABI_$(ABI) -DENDIANNESS_$(ENDIANNESS) -DSYS_$(SYSTEM) -o $@ $^
# If no asm implementation available, compile the portable C implementation
-# with CompCert. Since CompCert rejects the "__i64_" identifiers, the C
+# with CompCert. Since CompCert rejects the "__compcert_i64_" identifiers, the C
# implementation uses "i64_" identifiers, and we rename them in the
# generated assembly
%.o: c/%.c c/i64.h ../ccomp
../ccomp -O2 -S -o $*.s -I./c c/$*.c
- sed -i -e 's/i64_/__i64_/g' $*.s
+ sed -i -e 's/i64_/__compcert_i64_/g' $*.s
$(CASMRUNTIME) -o $*.o $*.s
@rm -f $*.s
diff --git a/runtime/arm/i64_dtos.S b/runtime/arm/i64_dtos.S
index e31f3f34..d633dfdf 100644
--- a/runtime/arm/i64_dtos.S
+++ b/runtime/arm/i64_dtos.S
@@ -38,7 +38,7 @@
@@@ Conversion from double float to signed 64-bit integer
-FUNCTION(__i64_dtos)
+FUNCTION(__compcert_i64_dtos)
#ifndef ABI_eabi
vmov Reg0LO, Reg0HI, d0
#endif
@@ -97,4 +97,4 @@ FUNCTION(__i64_dtos)
6: MOV Reg0LO, #0 @ result is 0x80....00 (MIN_SINT)
MOV Reg0HI, #0x80000000
bx lr
-ENDFUNCTION(__i64_dtos)
+ENDFUNCTION(__compcert_i64_dtos)
diff --git a/runtime/arm/i64_dtou.S b/runtime/arm/i64_dtou.S
index 6e47f3de..4fa3350b 100644
--- a/runtime/arm/i64_dtou.S
+++ b/runtime/arm/i64_dtou.S
@@ -38,7 +38,7 @@
@@@ Conversion from double float to unsigned 64-bit integer
-FUNCTION(__i64_dtou)
+FUNCTION(__compcert_i64_dtou)
#ifndef ABI_eabi
vmov Reg0LO, Reg0HI, d0
#endif
@@ -88,4 +88,4 @@ FUNCTION(__i64_dtou)
2: mvn Reg0LO, #0 @ result is 0xFF....FF (MAX_UINT)
MOV Reg0HI, Reg0LO
bx lr
-ENDFUNCTION(__i64_dtou)
+ENDFUNCTION(__compcert_i64_dtou)
diff --git a/runtime/arm/i64_sar.S b/runtime/arm/i64_sar.S
index dcaff1ac..d4412ea0 100644
--- a/runtime/arm/i64_sar.S
+++ b/runtime/arm/i64_sar.S
@@ -38,7 +38,7 @@
@@@ Shift right signed
-FUNCTION(__i64_sar)
+FUNCTION(__compcert_i64_sar)
AND r2, r2, #63 @ normalize amount to 0...63
rsbs r3, r2, #32 @ r3 = 32 - amount
ble 1f @ branch if <= 0, namely if amount >= 32
@@ -52,6 +52,6 @@ FUNCTION(__i64_sar)
ASR Reg0LO, Reg0HI, r2
ASR Reg0HI, Reg0HI, #31
bx lr
-ENDFUNCTION(__i64_sar)
+ENDFUNCTION(__compcert_i64_sar)
diff --git a/runtime/arm/i64_sdiv.S b/runtime/arm/i64_sdiv.S
index 358312da..24519e8f 100644
--- a/runtime/arm/i64_sdiv.S
+++ b/runtime/arm/i64_sdiv.S
@@ -38,7 +38,7 @@
@@@ Signed division
-FUNCTION(__i64_sdiv)
+FUNCTION(__compcert_i64_sdiv)
push {r4, r5, r6, r7, r8, r10, lr}
ASR r4, Reg0HI, #31 @ r4 = sign of N
ASR r5, Reg1HI, #31 @ r5 = sign of D
@@ -51,11 +51,11 @@ FUNCTION(__i64_sdiv)
EOR Reg1HI, Reg1HI, r5
subs Reg1LO, Reg1LO, r5
sbc Reg1HI, Reg1HI, r5
- bl __i64_udivmod @ do unsigned division
+ bl __compcert_i64_udivmod @ do unsigned division
EOR Reg0LO, Reg2LO, r10 @ apply expected sign
EOR Reg0HI, Reg2HI, r10
subs Reg0LO, Reg0LO, r10
sbc Reg0HI, Reg0HI, r10
pop {r4, r5, r6, r7, r8, r10, lr}
bx lr
-ENDFUNCTION(__i64_sdiv)
+ENDFUNCTION(__compcert_i64_sdiv)
diff --git a/runtime/arm/i64_shl.S b/runtime/arm/i64_shl.S
index 2b558cfe..cef5a766 100644
--- a/runtime/arm/i64_shl.S
+++ b/runtime/arm/i64_shl.S
@@ -57,7 +57,7 @@
@ RH = 0 | 0 | (XL << (N-32))
@ RL = 0
-FUNCTION(__i64_shl)
+FUNCTION(__compcert_i64_shl)
AND r2, r2, #63 @ normalize amount to 0...63
RSB r3, r2, #32 @ r3 = 32 - amount
LSL Reg0HI, Reg0HI, r2
@@ -68,4 +68,4 @@ FUNCTION(__i64_shl)
ORR Reg0HI, Reg0HI, r3
LSL Reg0LO, Reg0LO, r2
bx lr
-ENDFUNCTION(__i64_shl)
+ENDFUNCTION(__compcert_i64_shl)
diff --git a/runtime/arm/i64_shr.S b/runtime/arm/i64_shr.S
index 43325092..0f75eb2b 100644
--- a/runtime/arm/i64_shr.S
+++ b/runtime/arm/i64_shr.S
@@ -57,7 +57,7 @@
@ RL = 0 | 0 | (XH >> (N-32))
@ RH = 0
-FUNCTION(__i64_shr)
+FUNCTION(__compcert_i64_shr)
AND r2, r2, #63 @ normalize amount to 0...63
RSB r3, r2, #32 @ r3 = 32 - amount
LSR Reg0LO, Reg0LO, r2
@@ -68,4 +68,4 @@ FUNCTION(__i64_shr)
ORR Reg0LO, Reg0LO, r3
LSR Reg0HI, Reg0HI, r2
bx lr
-ENDFUNCTION(__i64_shr)
+ENDFUNCTION(__compcert_i64_shr)
diff --git a/runtime/arm/i64_smod.S b/runtime/arm/i64_smod.S
index 34c33c1c..24a8f19d 100644
--- a/runtime/arm/i64_smod.S
+++ b/runtime/arm/i64_smod.S
@@ -38,7 +38,7 @@
@@@ Signed modulus
-FUNCTION(__i64_smod)
+FUNCTION(__compcert_i64_smod)
push {r4, r5, r6, r7, r8, r10, lr}
ASR r4, Reg0HI, #31 @ r4 = sign of N
ASR r5, Reg1HI, #31 @ r5 = sign of D
@@ -51,11 +51,11 @@ FUNCTION(__i64_smod)
EOR Reg1HI, Reg1HI, r5
subs Reg1LO, Reg1LO, r5
sbc Reg1HI, Reg1HI, r5
- bl __i64_udivmod @ do unsigned division
+ bl __compcert_i64_udivmod @ do unsigned division
EOR Reg0LO, Reg0LO, r10 @ apply expected sign
EOR Reg0HI, Reg0HI, r10
subs Reg0LO, Reg0LO, r10
sbc Reg0HI, Reg0HI, r10
pop {r4, r5, r6, r7, r8, r10, lr}
bx lr
-ENDFUNCTION(__i64_smod)
+ENDFUNCTION(__compcert_i64_smod)
diff --git a/runtime/arm/i64_smulh.S b/runtime/arm/i64_smulh.S
index 476f51ce..5f32ff61 100644
--- a/runtime/arm/i64_smulh.S
+++ b/runtime/arm/i64_smulh.S
@@ -43,7 +43,7 @@
@ - subtract X if Y < 0
@ - subtract Y if X < 0
-FUNCTION(__i64_smulh)
+FUNCTION(__compcert_i64_smulh)
push {r4, r5, r6, r7}
@@@ r7:r6 accumulate bits 95-32 of the full product
umull r4, r6, Reg0LO, Reg1LO @ r6 = high half of XL.YL product
@@ -74,4 +74,4 @@ FUNCTION(__i64_smulh)
mov Reg0HI, r6
pop {r4, r5, r6, r7}
bx lr
-ENDFUNCTION(__i64_smulh)
+ENDFUNCTION(__compcert_i64_smulh)
diff --git a/runtime/arm/i64_stod.S b/runtime/arm/i64_stod.S
index 82ea9242..e4b220b4 100644
--- a/runtime/arm/i64_stod.S
+++ b/runtime/arm/i64_stod.S
@@ -38,8 +38,8 @@
@@@ Conversion from signed 64-bit integer to double float
-FUNCTION(__i64_stod)
-__i64_stod:
+FUNCTION(__compcert_i64_stod)
+__compcert_i64_stod:
vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
vmov s2, Reg0HI
@@ -50,7 +50,7 @@ __i64_stod:
vmov Reg0LO, Reg0HI, d0 @ return result in register pair r0:r1
#endif
bx lr
-ENDFUNCTION(__i64_stod)
+ENDFUNCTION(__compcert_i64_stod)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
diff --git a/runtime/arm/i64_stof.S b/runtime/arm/i64_stof.S
index d8a250c8..bcfa471c 100644
--- a/runtime/arm/i64_stof.S
+++ b/runtime/arm/i64_stof.S
@@ -38,7 +38,7 @@
@@@ Conversion from signed 64-bit integer to single float
-FUNCTION(__i64_stof)
+FUNCTION(__compcert_i64_stof)
@ Check whether -2^53 <= X < 2^53
ASR r2, Reg0HI, #21
ASR r3, Reg0HI, #31 @ (r2,r3) = X >> 53
@@ -71,7 +71,7 @@ FUNCTION(__i64_stof)
vmov r0, s0
#endif
bx lr
-ENDFUNCTION(__i64_stof)
+ENDFUNCTION(__compcert_i64_stof)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
diff --git a/runtime/arm/i64_udiv.S b/runtime/arm/i64_udiv.S
index 316b7647..91e4ec2a 100644
--- a/runtime/arm/i64_udiv.S
+++ b/runtime/arm/i64_udiv.S
@@ -38,11 +38,11 @@
@@@ Unsigned division
-FUNCTION(__i64_udiv)
+FUNCTION(__compcert_i64_udiv)
push {r4, r5, r6, r7, r8, lr}
- bl __i64_udivmod
+ bl __compcert_i64_udivmod
MOV Reg0LO, Reg2LO
MOV Reg0HI, Reg2HI
pop {r4, r5, r6, r7, r8, lr}
bx lr
-ENDFUNCTION(__i64_udiv)
+ENDFUNCTION(__compcert_i64_udiv)
diff --git a/runtime/arm/i64_udivmod.S b/runtime/arm/i64_udivmod.S
index 4ba99bc9..c9b11692 100644
--- a/runtime/arm/i64_udivmod.S
+++ b/runtime/arm/i64_udivmod.S
@@ -42,7 +42,7 @@
@ On exit: Q = (r4, r5) quotient R = (r0, r1) remainder
@ Locals: M = (r6, r7) mask TMP = r8 temporary
-FUNCTION(__i64_udivmod)
+FUNCTION(__compcert_i64_udivmod)
orrs r8, Reg1LO, Reg1HI @ is D == 0?
it eq
bxeq lr @ if so, return with unspecified results
@@ -76,4 +76,4 @@ FUNCTION(__i64_udivmod)
orrs r8, Reg3LO, Reg3HI @ repeat while (M != 0) ...
bne 2b
bx lr
-ENDFUNCTION(__i64_udivmod)
+ENDFUNCTION(__compcert_i64_udivmod)
diff --git a/runtime/arm/i64_umod.S b/runtime/arm/i64_umod.S
index e59fd203..b6e56ab2 100644
--- a/runtime/arm/i64_umod.S
+++ b/runtime/arm/i64_umod.S
@@ -38,9 +38,9 @@
@@@ Unsigned remainder
-FUNCTION(__i64_umod)
+FUNCTION(__compcert_i64_umod)
push {r4, r5, r6, r7, r8, lr}
- bl __i64_udivmod @ remainder is already in r0,r1
+ bl __compcert_i64_udivmod @ remainder is already in r0,r1
pop {r4, r5, r6, r7, r8, lr}
bx lr
-ENDFUNCTION(__i64_umod)
+ENDFUNCTION(__compcert_i64_umod)
diff --git a/runtime/arm/i64_umulh.S b/runtime/arm/i64_umulh.S
index c14f0c6b..8a7bf1c8 100644
--- a/runtime/arm/i64_umulh.S
+++ b/runtime/arm/i64_umulh.S
@@ -40,7 +40,7 @@
@ X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
-FUNCTION(__i64_umulh)
+FUNCTION(__compcert_i64_umulh)
push {r4, r5, r6, r7}
@@@ r7:r6 accumulate bits 95-32 of the full product
umull r4, r6, Reg0LO, Reg1LO @ r6 = high half of XL.YL product
@@ -58,4 +58,4 @@ FUNCTION(__i64_umulh)
ADC Reg0HI, r6, r5
pop {r4, r5, r6, r7}
bx lr
-ENDFUNCTION(__i64_umulh)
+ENDFUNCTION(__compcert_i64_umulh)
diff --git a/runtime/arm/i64_utod.S b/runtime/arm/i64_utod.S
index 593f8543..af7bcc71 100644
--- a/runtime/arm/i64_utod.S
+++ b/runtime/arm/i64_utod.S
@@ -38,8 +38,8 @@
@@@ Conversion from unsigned 64-bit integer to double float
-FUNCTION(__i64_utod)
-__i64_utod:
+FUNCTION(__compcert_i64_utod)
+__compcert_i64_utod:
vmov s0, Reg0LO
vcvt.f64.u32 d0, s0 @ convert low half to double (unsigned)
vmov s2, Reg0HI
@@ -50,7 +50,7 @@ __i64_utod:
vmov Reg0LO, Reg0HI, d0 @ return result in register pair r0:r1
#endif
bx lr
-ENDFUNCTION(__i64_utod)
+ENDFUNCTION(__compcert_i64_utod)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
diff --git a/runtime/arm/i64_utof.S b/runtime/arm/i64_utof.S
index be0ecc6a..66b146a9 100644
--- a/runtime/arm/i64_utof.S
+++ b/runtime/arm/i64_utof.S
@@ -38,7 +38,7 @@
@@@ Conversion from unsigned 64-bit integer to single float
-FUNCTION(__i64_utof)
+FUNCTION(__compcert_i64_utof)
@ Check whether X < 2^53
lsrs r2, Reg0HI, #21 @ test if X >> 53 == 0
beq 1f
@@ -67,7 +67,7 @@ FUNCTION(__i64_utof)
vmov r0, s0
#endif
bx lr
-ENDFUNCTION(__i64_utof)
+ENDFUNCTION(__compcert_i64_utof)
.balign 8
.LC1: .quad 0x41f0000000000000 @ 2^32 in double precision
diff --git a/runtime/powerpc/i64_dtos.s b/runtime/powerpc/i64_dtos.s
index 9b1288f4..85c60b27 100644
--- a/runtime/powerpc/i64_dtos.s
+++ b/runtime/powerpc/i64_dtos.s
@@ -39,8 +39,8 @@
### Conversion from double float to signed long
.balign 16
- .globl __i64_dtos
-__i64_dtos:
+ .globl __compcert_i64_dtos
+__compcert_i64_dtos:
stfdu f1, -16(r1) # extract LO (r4) and HI (r3) halves of double
lwz r3, 0(r1)
lwz r4, 4(r1)
@@ -95,6 +95,6 @@ __i64_dtos:
5: lis r3, 0x8000 # result is MIN_SINT = 0x8000_0000
li r4, 0
blr
- .type __i64_dtos, @function
- .size __i64_dtos, .-__i64_dtos
+ .type __compcert_i64_dtos, @function
+ .size __compcert_i64_dtos, .-__compcert_i64_dtos
\ No newline at end of file
diff --git a/runtime/powerpc/i64_dtou.s b/runtime/powerpc/i64_dtou.s
index 78cd08b1..67a721d4 100644
--- a/runtime/powerpc/i64_dtou.s
+++ b/runtime/powerpc/i64_dtou.s
@@ -39,8 +39,8 @@
### Conversion from double float to unsigned long
.balign 16
- .globl __i64_dtou
-__i64_dtou:
+ .globl __compcert_i64_dtou
+__compcert_i64_dtou:
stfdu f1, -16(r1) # extract LO (r4) and HI (r3) halves of double
lwz r3, 0(r1)
lwz r4, 4(r1)
@@ -86,7 +86,7 @@ __i64_dtou:
2: li r3, -1 # result is MAX_UINT
li r4, -1
blr
- .type __i64_dtou, @function
- .size __i64_dtou, .-__i64_dtou
+ .type __compcert_i64_dtou, @function
+ .size __compcert_i64_dtou, .-__compcert_i64_dtou
\ No newline at end of file
diff --git a/runtime/powerpc/i64_sar.s b/runtime/powerpc/i64_sar.s
index 0fd410d4..c7da448f 100644
--- a/runtime/powerpc/i64_sar.s
+++ b/runtime/powerpc/i64_sar.s
@@ -39,8 +39,8 @@
# Shift right signed
.balign 16
- .globl __i64_sar
-__i64_sar:
+ .globl __compcert_i64_sar
+__compcert_i64_sar:
andi. r5, r5, 63 # take amount modulo 64
cmpwi r5, 32
bge 1f # amount < 32?
@@ -54,7 +54,7 @@ __i64_sar:
sraw r4, r3, r6 # RL = XH >>s (amount - 32)
srawi r3, r3, 31 # RL = sign extension of XH
blr
- .type __i64_sar, @function
- .size __i64_sar, .-__i64_sar
+ .type __compcert_i64_sar, @function
+ .size __compcert_i64_sar, .-__compcert_i64_sar
\ No newline at end of file
diff --git a/runtime/powerpc/i64_sdiv.s b/runtime/powerpc/i64_sdiv.s
index 411ad50c..9787ea3b 100644
--- a/runtime/powerpc/i64_sdiv.s
+++ b/runtime/powerpc/i64_sdiv.s
@@ -39,8 +39,8 @@
### Signed division
.balign 16
- .globl __i64_sdiv
-__i64_sdiv:
+ .globl __compcert_i64_sdiv
+__compcert_i64_sdiv:
mflr r0
stw r0, 4(r1) # save return address in caller's frame
xor r0, r3, r5 # compute sign of result (top bit)
@@ -55,7 +55,7 @@ __i64_sdiv:
xor r5, r5, r0
subfc r6, r0, r6
subfe r5, r0, r5
- bl __i64_udivmod # do unsigned division
+ bl __compcert_i64_udivmod # do unsigned division
lwz r0, 4(r1)
mtlr r0 # restore return address
mfctr r0
@@ -65,7 +65,7 @@ __i64_sdiv:
subfc r4, r0, r6
subfe r3, r0, r5
blr
- .type __i64_sdiv, @function
- .size __i64_sdiv, .-__i64_sdiv
+ .type __compcert_i64_sdiv, @function
+ .size __compcert_i64_sdiv, .-__compcert_i64_sdiv
\ No newline at end of file
diff --git a/runtime/powerpc/i64_shl.s b/runtime/powerpc/i64_shl.s
index d122068b..f6edb6c2 100644
--- a/runtime/powerpc/i64_shl.s
+++ b/runtime/powerpc/i64_shl.s
@@ -39,8 +39,8 @@
# Shift left
.balign 16
- .globl __i64_shl
-__i64_shl:
+ .globl __compcert_i64_shl
+__compcert_i64_shl:
# On PowerPC, shift instructions with amount mod 64 >= 32 return 0
# hi = (hi << amount) | (lo >> (32 - amount)) | (lo << (amount - 32))
# lo = lo << amount
@@ -59,6 +59,6 @@ __i64_shl:
or r3, r3, r0
slw r4, r4, r5
blr
- .type __i64_shl, @function
- .size __i64_shl, .-__i64_shl
+ .type __compcert_i64_shl, @function
+ .size __compcert_i64_shl, .-__compcert_i64_shl
\ No newline at end of file
diff --git a/runtime/powerpc/i64_shr.s b/runtime/powerpc/i64_shr.s
index fb7dc5cc..b634aafd 100644
--- a/runtime/powerpc/i64_shr.s
+++ b/runtime/powerpc/i64_shr.s
@@ -39,8 +39,8 @@
# Shift right unsigned
.balign 16
- .globl __i64_shr
-__i64_shr:
+ .globl __compcert_i64_shr
+__compcert_i64_shr:
# On PowerPC, shift instructions with amount mod 64 >= 32 return 0
# lo = (lo >> amount) | (hi << (32 - amount)) | (hi >> (amount - 32))
# hi = hi >> amount
@@ -59,7 +59,7 @@ __i64_shr:
or r4, r4, r0
srw r3, r3, r5
blr
- .type __i64_shr, @function
- .size __i64_shr, .-__i64_shr
+ .type __compcert_i64_shr, @function
+ .size __compcert_i64_shr, .-__compcert_i64_shr
\ No newline at end of file
diff --git a/runtime/powerpc/i64_smod.s b/runtime/powerpc/i64_smod.s
index df6bfd8e..6b4e1f89 100644
--- a/runtime/powerpc/i64_smod.s
+++ b/runtime/powerpc/i64_smod.s
@@ -39,8 +39,8 @@
## Signed remainder
.balign 16
- .globl __i64_smod
-__i64_smod:
+ .globl __compcert_i64_smod
+__compcert_i64_smod:
mflr r0
stw r0, 4(r1) # save return address in caller's frame
mtctr r3 # save sign of result in CTR (sign of N)
@@ -54,7 +54,7 @@ __i64_smod:
xor r5, r5, r0
subfc r6, r0, r6
subfe r5, r0, r5
- bl __i64_udivmod # do unsigned division
+ bl __compcert_i64_udivmod # do unsigned division
lwz r0, 4(r1)
mtlr r0 # restore return address
mfctr r0
@@ -64,7 +64,7 @@ __i64_smod:
subfc r4, r0, r4
subfe r3, r0, r3
blr
- .type __i64_smod, @function
- .size __i64_smod, .-__i64_smod
+ .type __compcert_i64_smod, @function
+ .size __compcert_i64_smod, .-__compcert_i64_smod
\ No newline at end of file
diff --git a/runtime/powerpc/i64_smulh.s b/runtime/powerpc/i64_smulh.s
index f01855f3..73393fce 100644
--- a/runtime/powerpc/i64_smulh.s
+++ b/runtime/powerpc/i64_smulh.s
@@ -44,8 +44,8 @@
# - subtract Y if X < 0
.balign 16
- .globl __i64_smulh
-__i64_smulh:
+ .globl __compcert_i64_smulh
+__compcert_i64_smulh:
# r7:r8:r9 accumulate bits 127:32 of the full unsigned product
mulhwu r9, r4, r6 # r9 = high half of XL.YL
mullw r0, r4, r5 # r0 = low half of XL.YH
@@ -75,6 +75,6 @@ __i64_smulh:
subfc r4, r6, r8 # subtract Y
subfe r3, r5, r7
blr
- .type __i64_smulh, @function
- .size __i64_smulh, .-__i64_smulh
+ .type __compcert_i64_smulh, @function
+ .size __compcert_i64_smulh, .-__compcert_i64_smulh
diff --git a/runtime/powerpc/i64_stod.s b/runtime/powerpc/i64_stod.s
index cca109ba..0c1ab720 100644
--- a/runtime/powerpc/i64_stod.s
+++ b/runtime/powerpc/i64_stod.s
@@ -37,8 +37,8 @@
### Conversion from signed long to double float
.balign 16
- .globl __i64_stod
-__i64_stod:
+ .globl __compcert_i64_stod
+__compcert_i64_stod:
addi r1, r1, -16
lis r5, 0x4330
li r6, 0
@@ -62,6 +62,6 @@ __i64_stod:
fadd f1, f1, f2 # add both to get result
addi r1, r1, 16
blr
- .type __i64_stod, @function
- .size __i64_stod, .-__i64_stod
+ .type __compcert_i64_stod, @function
+ .size __compcert_i64_stod, .-__compcert_i64_stod
diff --git a/runtime/powerpc/i64_stof.s b/runtime/powerpc/i64_stof.s
index 05b36a78..97fa6bb8 100644
--- a/runtime/powerpc/i64_stof.s
+++ b/runtime/powerpc/i64_stof.s
@@ -39,8 +39,8 @@
### Conversion from signed long to single float
.balign 16
- .globl __i64_stof
-__i64_stof:
+ .globl __compcert_i64_stof
+__compcert_i64_stof:
mflr r9
# Check whether -2^53 <= X < 2^53
srawi r5, r3, 31
@@ -59,10 +59,10 @@ __i64_stof:
or r4, r4, r0 # correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 # set to 0 bits 0 to 11 of X
# Convert to double, then round to single
-1: bl __i64_stod
+1: bl __compcert_i64_stod
mtlr r9
frsp f1, f1
blr
- .type __i64_stof, @function
- .size __i64_stof, .-__i64_stof
+ .type __compcert_i64_stof, @function
+ .size __compcert_i64_stof, .-__compcert_i64_stof
diff --git a/runtime/powerpc/i64_udiv.s b/runtime/powerpc/i64_udiv.s
index 9443d59b..e2da855a 100644
--- a/runtime/powerpc/i64_udiv.s
+++ b/runtime/powerpc/i64_udiv.s
@@ -39,16 +39,16 @@
### Unsigned division
.balign 16
- .globl __i64_udiv
-__i64_udiv:
+ .globl __compcert_i64_udiv
+__compcert_i64_udiv:
mflr r0
stw r0, 4(r1) # save return address in caller's frame
- bl __i64_udivmod # unsigned divide
+ bl __compcert_i64_udivmod # unsigned divide
lwz r0, 4(r1)
mtlr r0 # restore return address
mr r3, r5 # result = quotient
mr r4, r6
blr
- .type __i64_udiv, @function
- .size __i64_udiv, .-__i64_udiv
+ .type __compcert_i64_udiv, @function
+ .size __compcert_i64_udiv, .-__compcert_i64_udiv
diff --git a/runtime/powerpc/i64_udivmod.s b/runtime/powerpc/i64_udivmod.s
index 826d9896..e81c6cef 100644
--- a/runtime/powerpc/i64_udivmod.s
+++ b/runtime/powerpc/i64_udivmod.s
@@ -45,9 +45,9 @@
# Output: quotient Q in (r5,r6), remainder R in (r3,r4)
# Destroys: all integer caller-save registers
- .globl __i64_udivmod
+ .globl __compcert_i64_udivmod
.balign 16
-__i64_udivmod:
+__compcert_i64_udivmod:
cmplwi r5, 0 # DH == 0 ?
stwu r1, -32(r1)
mflr r0
@@ -73,7 +73,7 @@ __i64_udivmod:
srw r6, r6, r8
or r5, r6, r0
# Divide N' by D' to get an approximate quotient Q
- bl __i64_udiv6432 # r3 = quotient, r4 = remainder
+ bl __compcert_i64_udiv6432 # r3 = quotient, r4 = remainder
mr r6, r3 # low half of quotient Q
li r5, 0 # high half of quotient is 0
# Tentative quotient is either correct or one too high
@@ -112,7 +112,7 @@ __i64_udivmod:
mullw r0, r31, r6
subf r3, r0, r3 # NH is remainder of this division
mr r5, r6
- bl __i64_udiv6432 # divide NH : NL by DL
+ bl __compcert_i64_udiv6432 # divide NH : NL by DL
mr r5, r31 # high word of quotient
mr r6, r3 # low word of quotient
# r4 contains low word of remainder
@@ -133,8 +133,8 @@ __i64_udivmod:
addi r1, r1, 32
blr
- .type __i64_udivmod, @function
- .size __i64_udivmod, .-__i64_udivmod
+ .type __compcert_i64_udivmod, @function
+ .size __compcert_i64_udivmod, .-__compcert_i64_udivmod
# Auxiliary division function: 64 bit integer divided by 32 bit integer
# Not exported
@@ -144,7 +144,7 @@ __i64_udivmod:
# Assumes: high word of N is less than D
.balign 16
-__i64_udiv6432:
+__compcert_i64_udiv6432:
# Algorithm 9.3 from Hacker's Delight, section 9.4
# Initially: u1 in r3, u0 in r4, v in r5
# s = __builtin_clz(v);
@@ -230,5 +230,5 @@ __i64_udiv6432:
add r3, r0, r3
blr
- .type __i64_udiv6432, @function
- .size __i64_udiv6432,.-__i64_udiv6432
+ .type __compcert_i64_udiv6432, @function
+ .size __compcert_i64_udiv6432,.-__compcert_i64_udiv6432
diff --git a/runtime/powerpc/i64_umod.s b/runtime/powerpc/i64_umod.s
index a4f23c98..bf8d6121 100644
--- a/runtime/powerpc/i64_umod.s
+++ b/runtime/powerpc/i64_umod.s
@@ -39,9 +39,9 @@
### Unsigned modulus
.balign 16
- .globl __i64_umod
-__i64_umod:
- b __i64_udivmod
- .type __i64_umod, @function
- .size __i64_umod, .-__i64_umod
+ .globl __compcert_i64_umod
+__compcert_i64_umod:
+ b __compcert_i64_udivmod
+ .type __compcert_i64_umod, @function
+ .size __compcert_i64_umod, .-__compcert_i64_umod
diff --git a/runtime/powerpc/i64_umulh.s b/runtime/powerpc/i64_umulh.s
index 1c609466..53b72948 100644
--- a/runtime/powerpc/i64_umulh.s
+++ b/runtime/powerpc/i64_umulh.s
@@ -41,8 +41,8 @@
# X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
.balign 16
- .globl __i64_umulh
-__i64_umulh:
+ .globl __compcert_i64_umulh
+__compcert_i64_umulh:
# r7:r8:r9 accumulate bits 127:32 of the full product
mulhwu r9, r4, r6 # r9 = high half of XL.YL
mullw r0, r4, r5 # r0 = low half of XL.YH
@@ -60,6 +60,6 @@ __i64_umulh:
mulhwu r0, r3, r5 # r0 = high half of XH.YH
adde r3, r7, r0
blr
- .type __i64_umulh, @function
- .size __i64_umulh, .-__i64_umulh
+ .type __compcert_i64_umulh, @function
+ .size __compcert_i64_umulh, .-__compcert_i64_umulh
diff --git a/runtime/powerpc/i64_utod.s b/runtime/powerpc/i64_utod.s
index 01a27583..69de6fdb 100644
--- a/runtime/powerpc/i64_utod.s
+++ b/runtime/powerpc/i64_utod.s
@@ -39,8 +39,8 @@
### Conversion from unsigned long to double float
.balign 16
- .globl __i64_utod
-__i64_utod:
+ .globl __compcert_i64_utod
+__compcert_i64_utod:
addi r1, r1, -16
lis r5, 0x4330
li r6, 0
@@ -61,6 +61,6 @@ __i64_utod:
fadd f1, f1, f2 # add both to get result
addi r1, r1, 16
blr
- .type __i64_utod, @function
- .size __i64_utod, .-__i64_utod
+ .type __compcert_i64_utod, @function
+ .size __compcert_i64_utod, .-__compcert_i64_utod
diff --git a/runtime/powerpc/i64_utof.s b/runtime/powerpc/i64_utof.s
index 2617cbda..cdb2f867 100644
--- a/runtime/powerpc/i64_utof.s
+++ b/runtime/powerpc/i64_utof.s
@@ -39,8 +39,8 @@
### Conversion from unsigned long to single float
.balign 16
- .globl __i64_utof
-__i64_utof:
+ .globl __compcert_i64_utof
+__compcert_i64_utof:
mflr r9
# Check whether X < 2^53
andis. r0, r3, 0xFFE0 # test bits 53...63 of X
@@ -55,10 +55,10 @@ __i64_utof:
or r4, r4, r0 # correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 # set to 0 bits 0 to 11 of X
# Convert to double, then round to single
-1: bl __i64_utod
+1: bl __compcert_i64_utod
mtlr r9
frsp f1, f1
blr
- .type __i64_utof, @function
- .size __i64_utof, .-__i64_utof
+ .type __compcert_i64_utof, @function
+ .size __compcert_i64_utof, .-__compcert_i64_utof
diff --git a/runtime/powerpc64/i64_dtou.s b/runtime/powerpc64/i64_dtou.s
index 60d5c9bf..e58bcfaf 100644
--- a/runtime/powerpc64/i64_dtou.s
+++ b/runtime/powerpc64/i64_dtou.s
@@ -39,8 +39,8 @@
### Conversion from double float to unsigned long
.balign 16
- .globl __i64_dtou
-__i64_dtou:
+ .globl __compcert_i64_dtou
+__compcert_i64_dtou:
lis r0, 0x5f00 # 0x5f00_0000 = 2^63 in binary32 format
stwu r0, -16(r1)
lfs f2, 0(r1) # f2 = 2^63
@@ -60,7 +60,7 @@ __i64_dtou:
addis r3, r3, 0x8000 # shift result up by 2^63
addi r1, r1, 16
blr
- .type __i64_dtou, @function
- .size __i64_dtou, .-__i64_dtou
+ .type __compcert_i64_dtou, @function
+ .size __compcert_i64_dtou, .-__compcert_i64_dtou
diff --git a/runtime/powerpc64/i64_stof.s b/runtime/powerpc64/i64_stof.s
index 8830d594..779cbc18 100644
--- a/runtime/powerpc64/i64_stof.s
+++ b/runtime/powerpc64/i64_stof.s
@@ -39,8 +39,8 @@
### Conversion from signed long to single float
.balign 16
- .globl __i64_stof
-__i64_stof:
+ .globl __compcert_i64_stof
+__compcert_i64_stof:
rldimi r4, r3, 32, 0 # reassemble (r3,r4) as a 64-bit integer in r4
# Check whether -2^53 <= X < 2^53
sradi r5, r4, 53
@@ -63,6 +63,6 @@ __i64_stof:
frsp f1, f1
addi r1, r1, 16
blr
- .type __i64_stof, @function
- .size __i64_stof, .-__i64_stof
+ .type __compcert_i64_stof, @function
+ .size __compcert_i64_stof, .-__compcert_i64_stof
diff --git a/runtime/powerpc64/i64_utod.s b/runtime/powerpc64/i64_utod.s
index ddde91dd..491ee26b 100644
--- a/runtime/powerpc64/i64_utod.s
+++ b/runtime/powerpc64/i64_utod.s
@@ -39,8 +39,8 @@
### Conversion from unsigned long to double float
.balign 16
- .globl __i64_utod
-__i64_utod:
+ .globl __compcert_i64_utod
+__compcert_i64_utod:
rldicl r3, r3, 0, 32 # clear top 32 bits
rldicl r4, r4, 0, 32 # clear top 32 bits
lis r5, 0x4f80 # 0x4f80_0000 = 2^32 in binary32 format
@@ -55,8 +55,8 @@ __i64_utod:
fmadd f1, f1, f3, f2 # compute hi * 2^32 + lo
addi r1, r1, 32
blr
- .type __i64_utod, @function
- .size __i64_utod, .-__i64_utod
+ .type __compcert_i64_utod, @function
+ .size __compcert_i64_utod, .-__compcert_i64_utod
# Alternate implementation using round-to-odd:
# rldimi r4, r3, 32, 0 # reassemble (r3,r4) as a 64-bit integer in r4
diff --git a/runtime/powerpc64/i64_utof.s b/runtime/powerpc64/i64_utof.s
index 2617cbda..cdb2f867 100644
--- a/runtime/powerpc64/i64_utof.s
+++ b/runtime/powerpc64/i64_utof.s
@@ -39,8 +39,8 @@
### Conversion from unsigned long to single float
.balign 16
- .globl __i64_utof
-__i64_utof:
+ .globl __compcert_i64_utof
+__compcert_i64_utof:
mflr r9
# Check whether X < 2^53
andis. r0, r3, 0xFFE0 # test bits 53...63 of X
@@ -55,10 +55,10 @@ __i64_utof:
or r4, r4, r0 # correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 # set to 0 bits 0 to 11 of X
# Convert to double, then round to single
-1: bl __i64_utod
+1: bl __compcert_i64_utod
mtlr r9
frsp f1, f1
blr
- .type __i64_utof, @function
- .size __i64_utof, .-__i64_utof
+ .type __compcert_i64_utof, @function
+ .size __compcert_i64_utof, .-__compcert_i64_utof
diff --git a/runtime/test/test_int64.c b/runtime/test/test_int64.c
index 58a129b6..0151e22f 100644
--- a/runtime/test/test_int64.c
+++ b/runtime/test/test_int64.c
@@ -41,21 +41,21 @@
typedef unsigned long long u64;
typedef signed long long s64;
-extern u64 __i64_udiv(u64 x, u64 y);
-extern u64 __i64_umod(u64 x, u64 y);
-extern s64 __i64_sdiv(s64 x, s64 y);
-extern s64 __i64_smod(s64 x, s64 y);
-
-extern u64 __i64_shl(u64 x, unsigned amount);
-extern u64 __i64_shr(u64 x, unsigned amount);
-extern s64 __i64_sar(s64 x, unsigned amount);
-
-extern double __i64_utod(u64 x);
-extern double __i64_stod(s64 x);
-extern float __i64_utof(u64 x);
-extern float __i64_stof(s64 x);
-extern u64 __i64_dtou(double d);
-extern s64 __i64_dtos(double d);
+extern u64 __compcert_i64_udiv(u64 x, u64 y);
+extern u64 __compcert_i64_umod(u64 x, u64 y);
+extern s64 __compcert_i64_sdiv(s64 x, s64 y);
+extern s64 __compcert_i64_smod(s64 x, s64 y);
+
+extern u64 __compcert_i64_shl(u64 x, unsigned amount);
+extern u64 __compcert_i64_shr(u64 x, unsigned amount);
+extern s64 __compcert_i64_sar(s64 x, unsigned amount);
+
+extern double __compcert_i64_utod(u64 x);
+extern double __compcert_i64_stod(s64 x);
+extern float __compcert_i64_utof(u64 x);
+extern float __compcert_i64_stof(s64 x);
+extern u64 __compcert_i64_dtou(double d);
+extern s64 __compcert_i64_dtos(double d);
static u64 rnd64(void)
{
@@ -76,11 +76,11 @@ static void test1(u64 x, u64 y)
if (y != 0) {
- z = __i64_udiv(x, y);
+ z = __compcert_i64_udiv(x, y);
if (z != x / y)
error++, printf("%llu /u %llu = %llu, expected %llu\n", x, y, z, x / y);
- z = __i64_umod(x, y);
+ z = __compcert_i64_umod(x, y);
if (z != x % y)
error++, printf("%llu %%u %llu = %llu, expected %llu\n", x, y, z, x % y);
@@ -88,11 +88,11 @@ static void test1(u64 x, u64 y)
if (y != 0 && !(x == 0x800000000000LLU && y == -1)) {
- t = __i64_sdiv(x, y);
+ t = __compcert_i64_sdiv(x, y);
if (t != (s64) x / (s64) y)
error++, printf("%lld /s %lld = %lld, expected %lld\n", x, y, t, (s64) x / (s64) y);
- t = __i64_smod(x, y);
+ t = __compcert_i64_smod(x, y);
if (t != (s64) x % (s64) y)
error++, printf("%lld %%s %lld = %lld, expected %lld\n", x, y, t, (s64) x % (s64) y);
@@ -104,11 +104,11 @@ static void test1(u64 x, u64 y)
if (uy != 0) {
- z = __i64_udiv(x, uy);
+ z = __compcert_i64_udiv(x, uy);
if (z != x / uy)
error++, printf("%llu /u %llu = %llu, expected %llu\n", x, uy, z, x / uy);
- z = __i64_umod(x, uy);
+ z = __compcert_i64_umod(x, uy);
if (z != x % uy)
error++, printf("%llu %%u %llu = %llu, expected %llu\n", x, uy, z, x % uy);
@@ -116,11 +116,11 @@ static void test1(u64 x, u64 y)
if (sy != 0 && !(x == 0x800000000000LLU && sy == -1)) {
- t = __i64_sdiv(x, sy);
+ t = __compcert_i64_sdiv(x, sy);
if (t != (s64) x / sy)
error++, printf("%lld /s %lld = %lld, expected %lld\n", x, sy, t, (s64) x / sy);
- t = __i64_smod(x, sy);
+ t = __compcert_i64_smod(x, sy);
if (t != (s64) x % sy)
error++, printf("%lld %%s %lld = %lld, expected %lld\n", x, sy, t, (s64) x % sy);
@@ -128,59 +128,59 @@ static void test1(u64 x, u64 y)
i = y & 63;
- z = __i64_shl(x, i);
+ z = __compcert_i64_shl(x, i);
if (z != x << i)
error++, printf("%016llx << %d = %016llx, expected %016llx\n", x, i, z, x << i);
- z = __i64_shr(x, i);
+ z = __compcert_i64_shr(x, i);
if (z != x >> i)
error++, printf("%016llx >>u %d = %016llx, expected %016llx\n", x, i, z, x >> i);
- t = __i64_sar(x, i);
+ t = __compcert_i64_sar(x, i);
if (t != (s64) x >> i)
error++, printf("%016llx >>s %d = %016llx, expected %016llx\n", x, i, t, (s64) x >> i);
- f = __i64_utod(x);
+ f = __compcert_i64_utod(x);
g = (double) x;
if (f != g)
error++, printf("(double) %llu (u) = %a, expected %a\n", x, f, g);
- f = __i64_stod(x);
+ f = __compcert_i64_stod(x);
g = (double) (s64) x;
if (f != g)
error++, printf("(double) %lld (s) = %a, expected %a\n", x, f, g);
- u = __i64_utof(x);
+ u = __compcert_i64_utof(x);
v = (float) x;
if (u != v)
error++, printf("(float) %llu (u) = %a, expected %a\n", x, u, v);
- u = __i64_stof(x);
+ u = __compcert_i64_stof(x);
v = (float) (s64) x;
if (u != v)
error++, printf("(float) %lld (s) = %a, expected %a\n", x, u, v);
f = (double) x;
if (f >= 0 && f < 0x1p+64) {
- z = __i64_dtou(f);
+ z = __compcert_i64_dtou(f);
if (z != (u64) f)
error++, printf("(u64) %a = %llu, expected %llu\n", f, z, (u64) f);
}
f = (double) (s64) x;
if (f >= -0x1p+63 && f < 0x1p+63) {
- t = __i64_dtos(f);
+ t = __compcert_i64_dtos(f);
if (t != (s64) f)
error++, printf("(s64) %a = %lld, expected %lld\n", f, z, (s64) f);
}
f = ((double) x) * 0.0001;
- z = __i64_dtou(f);
+ z = __compcert_i64_dtou(f);
if (z != (u64) f)
error++, printf("(u64) %a = %llu, expected %llu\n", f, z, (u64) f);
f = ((double) (s64) x) * 0.0001;
- t = __i64_dtos(f);
+ t = __compcert_i64_dtos(f);
if (t != (s64) f)
error++, printf("(s64) %a = %lld, expected %lld\n", f, z, (s64) f);
}
diff --git a/runtime/x86_32/i64_dtos.S b/runtime/x86_32/i64_dtos.S
index 3cc381bf..ccc0013c 100644
--- a/runtime/x86_32/i64_dtos.S
+++ b/runtime/x86_32/i64_dtos.S
@@ -38,7 +38,7 @@
// Conversion float -> signed long
-FUNCTION(__i64_dtos)
+FUNCTION(__compcert_i64_dtos)
subl $4, %esp
// Change rounding mode to "round towards zero"
fnstcw 0(%esp)
@@ -56,5 +56,5 @@ FUNCTION(__i64_dtos)
movl 12(%esp), %edx
addl $4, %esp
ret
-ENDFUNCTION(__i64_dtos)
+ENDFUNCTION(__compcert_i64_dtos)
diff --git a/runtime/x86_32/i64_dtou.S b/runtime/x86_32/i64_dtou.S
index 4903f847..1115328d 100644
--- a/runtime/x86_32/i64_dtou.S
+++ b/runtime/x86_32/i64_dtou.S
@@ -38,7 +38,7 @@
// Conversion float -> unsigned long
-FUNCTION(__i64_dtou)
+FUNCTION(__compcert_i64_dtou)
subl $4, %esp
// Compare argument with 2^63
fldl 8(%esp)
@@ -84,5 +84,5 @@ FUNCTION(__i64_dtou)
.p2align 2
LC1: .long 0x5f000000 // 2^63 in single precision
-ENDFUNCTION(__i64_dtou)
+ENDFUNCTION(__compcert_i64_dtou)
\ No newline at end of file
diff --git a/runtime/x86_32/i64_sar.S b/runtime/x86_32/i64_sar.S
index cf2233b1..d62d0d69 100644
--- a/runtime/x86_32/i64_sar.S
+++ b/runtime/x86_32/i64_sar.S
@@ -40,7 +40,7 @@
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
-FUNCTION(__i64_sar)
+FUNCTION(__compcert_i64_sar)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
@@ -56,5 +56,5 @@ FUNCTION(__i64_sar)
sarl %cl, %eax // eax = XH >> (amount - 32)
sarl $31, %edx // edx = sign of X
ret
-ENDFUNCTION(__i64_sar)
+ENDFUNCTION(__compcert_i64_sar)
diff --git a/runtime/x86_32/i64_sdiv.S b/runtime/x86_32/i64_sdiv.S
index f6551c7d..2da5706c 100644
--- a/runtime/x86_32/i64_sdiv.S
+++ b/runtime/x86_32/i64_sdiv.S
@@ -38,7 +38,7 @@
// Signed division
-FUNCTION(__i64_sdiv)
+FUNCTION(__compcert_i64_sdiv)
pushl %ebp
pushl %esi
pushl %edi
@@ -58,7 +58,7 @@ FUNCTION(__i64_sdiv)
adcl $0, %esi
negl %esi
movl %esi, 28(%esp)
-2: call GLOB(__i64_udivmod)
+2: call GLOB(__compcert_i64_udivmod)
testl %ebp, %ebp // apply sign to result
jge 3f
negl %esi
@@ -70,5 +70,5 @@ FUNCTION(__i64_sdiv)
popl %esi
popl %ebp
ret
-ENDFUNCTION(__i64_sdiv)
+ENDFUNCTION(__compcert_i64_sdiv)
diff --git a/runtime/x86_32/i64_shl.S b/runtime/x86_32/i64_shl.S
index 1fabebce..78f32cd6 100644
--- a/runtime/x86_32/i64_shl.S
+++ b/runtime/x86_32/i64_shl.S
@@ -40,7 +40,7 @@
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
-FUNCTION(__i64_shl)
+FUNCTION(__compcert_i64_shl)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
@@ -55,5 +55,5 @@ FUNCTION(__i64_shl)
shll %cl, %edx // edx = XL << (amount - 32)
xorl %eax, %eax // eax = 0
ret
-ENDFUNCTION(__i64_shl)
+ENDFUNCTION(__compcert_i64_shl)
diff --git a/runtime/x86_32/i64_shr.S b/runtime/x86_32/i64_shr.S
index 34196f09..36d970fc 100644
--- a/runtime/x86_32/i64_shr.S
+++ b/runtime/x86_32/i64_shr.S
@@ -40,7 +40,7 @@
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
-FUNCTION(__i64_shr)
+FUNCTION(__compcert_i64_shr)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
@@ -55,5 +55,5 @@ FUNCTION(__i64_shr)
shrl %cl, %eax // eax = XH >> (amount - 32)
xorl %edx, %edx // edx = 0
ret
-ENDFUNCTION(__i64_shr)
+ENDFUNCTION(__compcert_i64_shr)
diff --git a/runtime/x86_32/i64_smod.S b/runtime/x86_32/i64_smod.S
index 28f47ad4..f2069d69 100644
--- a/runtime/x86_32/i64_smod.S
+++ b/runtime/x86_32/i64_smod.S
@@ -38,7 +38,7 @@
// Signed remainder
-FUNCTION(__i64_smod)
+FUNCTION(__compcert_i64_smod)
pushl %ebp
pushl %esi
pushl %edi
@@ -57,7 +57,7 @@ FUNCTION(__i64_smod)
adcl $0, %esi
negl %esi
movl %esi, 28(%esp)
-2: call GLOB(__i64_udivmod)
+2: call GLOB(__compcert_i64_udivmod)
testl %ebp, %ebp // apply sign to result
jge 3f
negl %eax
@@ -67,4 +67,4 @@ FUNCTION(__i64_smod)
popl %esi
popl %ebp
ret
-ENDFUNCTION(__i64_smod)
+ENDFUNCTION(__compcert_i64_smod)
diff --git a/runtime/x86_32/i64_smulh.S b/runtime/x86_32/i64_smulh.S
index cc0f0167..618f40ba 100644
--- a/runtime/x86_32/i64_smulh.S
+++ b/runtime/x86_32/i64_smulh.S
@@ -48,7 +48,7 @@
// - subtract X if Y < 0
// - subtract Y if X < 0
-FUNCTION(__i64_smulh)
+FUNCTION(__compcert_i64_smulh)
pushl %esi
pushl %edi
movl XL, %eax
@@ -91,4 +91,4 @@ FUNCTION(__i64_smulh)
popl %edi
popl %esi
ret
-ENDFUNCTION(__i64_smulh)
+ENDFUNCTION(__compcert_i64_smulh)
diff --git a/runtime/x86_32/i64_stod.S b/runtime/x86_32/i64_stod.S
index d020e2fc..8faf480f 100644
--- a/runtime/x86_32/i64_stod.S
+++ b/runtime/x86_32/i64_stod.S
@@ -38,12 +38,12 @@
// Conversion signed long -> double-precision float
-FUNCTION(__i64_stod)
+FUNCTION(__compcert_i64_stod)
fildll 4(%esp)
ret
// The result is in extended precision (80 bits) and therefore
// exact (64 bits of mantissa). It will be rounded to double
// precision by the caller, when transferring the result
// to an XMM register or a 64-bit stack slot.
-ENDFUNCTION(__i64_stod)
+ENDFUNCTION(__compcert_i64_stod)
diff --git a/runtime/x86_32/i64_stof.S b/runtime/x86_32/i64_stof.S
index 25b1d4f7..4b5817ac 100644
--- a/runtime/x86_32/i64_stof.S
+++ b/runtime/x86_32/i64_stof.S
@@ -38,12 +38,12 @@
// Conversion signed long -> single-precision float
-FUNCTION(__i64_stof)
+FUNCTION(__compcert_i64_stof)
fildll 4(%esp)
// The TOS is in extended precision and therefore exact.
// Force rounding to single precision
fstps 4(%esp)
flds 4(%esp)
ret
-ENDFUNCTION(__i64_stof)
+ENDFUNCTION(__compcert_i64_stof)
diff --git a/runtime/x86_32/i64_udiv.S b/runtime/x86_32/i64_udiv.S
index 75305433..c9ae64f6 100644
--- a/runtime/x86_32/i64_udiv.S
+++ b/runtime/x86_32/i64_udiv.S
@@ -38,15 +38,15 @@
// Unsigned division
-FUNCTION(__i64_udiv)
+FUNCTION(__compcert_i64_udiv)
pushl %ebp
pushl %esi
pushl %edi
- call GLOB(__i64_udivmod)
+ call GLOB(__compcert_i64_udivmod)
movl %esi, %eax
movl %edi, %edx
popl %edi
popl %esi
popl %ebp
ret
-ENDFUNCTION(__i64_udiv)
+ENDFUNCTION(__compcert_i64_udiv)
diff --git a/runtime/x86_32/i64_udivmod.S b/runtime/x86_32/i64_udivmod.S
index dccfc286..a5d42fa5 100644
--- a/runtime/x86_32/i64_udivmod.S
+++ b/runtime/x86_32/i64_udivmod.S
@@ -45,7 +45,7 @@
// eax:edx is remainder R
// ebp is preserved
-FUNCTION(__i64_udivmod)
+FUNCTION(__compcert_i64_udivmod)
cmpl $0, 32(%esp) // single-word divisor? (DH = 0)
jne 1f
// Special case 64 bits divided by 32 bits
@@ -101,4 +101,4 @@ FUNCTION(__i64_udivmod)
5: decl %esi // adjust Q down by 1
jmp 3b // and redo check & computation of remainder
-ENDFUNCTION(__i64_udivmod)
+ENDFUNCTION(__compcert_i64_udivmod)
diff --git a/runtime/x86_32/i64_umod.S b/runtime/x86_32/i64_umod.S
index a019df28..241a687b 100644
--- a/runtime/x86_32/i64_umod.S
+++ b/runtime/x86_32/i64_umod.S
@@ -38,14 +38,14 @@
// Unsigned remainder
-FUNCTION(__i64_umod)
+FUNCTION(__compcert_i64_umod)
pushl %ebp
pushl %esi
pushl %edi
- call GLOB(__i64_udivmod)
+ call GLOB(__compcert_i64_udivmod)
popl %edi
popl %esi
popl %ebp
ret
-ENDFUNCTION(__i64_umod)
+ENDFUNCTION(__compcert_i64_umod)
diff --git a/runtime/x86_32/i64_umulh.S b/runtime/x86_32/i64_umulh.S
index 449a0f8b..2dba0975 100644
--- a/runtime/x86_32/i64_umulh.S
+++ b/runtime/x86_32/i64_umulh.S
@@ -45,7 +45,7 @@
// X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
-FUNCTION(__i64_umulh)
+FUNCTION(__compcert_i64_umulh)
pushl %esi
pushl %edi
movl XL, %eax
@@ -70,5 +70,5 @@ FUNCTION(__i64_umulh)
popl %edi
popl %esi
ret
-ENDFUNCTION(__i64_umulh)
+ENDFUNCTION(__compcert_i64_umulh)
diff --git a/runtime/x86_32/i64_utod.S b/runtime/x86_32/i64_utod.S
index 428a3b94..d7ec582f 100644
--- a/runtime/x86_32/i64_utod.S
+++ b/runtime/x86_32/i64_utod.S
@@ -38,7 +38,7 @@
// Conversion unsigned long -> double-precision float
-FUNCTION(__i64_utod)
+FUNCTION(__compcert_i64_utod)
fildll 4(%esp) // convert as if signed
cmpl $0, 8(%esp) // is argument >= 2^63?
jns 1f
@@ -52,4 +52,4 @@ FUNCTION(__i64_utod)
.p2align 2
LC1: .long 0x5f800000 // 2^64 in single precision
-ENDFUNCTION(__i64_utod)
+ENDFUNCTION(__compcert_i64_utod)
diff --git a/runtime/x86_32/i64_utof.S b/runtime/x86_32/i64_utof.S
index 0b58f48b..858caa37 100644
--- a/runtime/x86_32/i64_utof.S
+++ b/runtime/x86_32/i64_utof.S
@@ -38,7 +38,7 @@
// Conversion unsigned long -> single-precision float
-FUNCTION(__i64_utof)
+FUNCTION(__compcert_i64_utof)
fildll 4(%esp) // convert as if signed
cmpl $0, 8(%esp) // is argument >= 2^63?
jns 1f
@@ -52,4 +52,4 @@ FUNCTION(__i64_utof)
.p2align 2
LC1: .long 0x5f800000 // 2^64 in single precision
-ENDFUNCTION(__i64_utof)
+ENDFUNCTION(__compcert_i64_utof)
diff --git a/runtime/x86_64/i64_dtou.S b/runtime/x86_64/i64_dtou.S
index e455ea6f..cc822d67 100644
--- a/runtime/x86_64/i64_dtou.S
+++ b/runtime/x86_64/i64_dtou.S
@@ -38,7 +38,7 @@
// Conversion float -> unsigned long
-FUNCTION(__i64_dtou)
+FUNCTION(__compcert_i64_dtou)
ucomisd .LC1(%rip), %xmm0
jnb 1f
cvttsd2siq %xmm0, %rax
@@ -52,5 +52,5 @@ FUNCTION(__i64_dtou)
.LC1: .quad 0x43e0000000000000 // 2^63 in double precision
.LC2: .quad 0x8000000000000000 // 2^63 as an integer
-ENDFUNCTION(__i64_dtou)
+ENDFUNCTION(__compcert_i64_dtou)
diff --git a/runtime/x86_64/i64_utod.S b/runtime/x86_64/i64_utod.S
index 96b77a64..62e6e484 100644
--- a/runtime/x86_64/i64_utod.S
+++ b/runtime/x86_64/i64_utod.S
@@ -38,7 +38,7 @@
// Conversion unsigned long -> double-precision float
-FUNCTION(__i64_utod)
+FUNCTION(__compcert_i64_utod)
testq %rdi, %rdi
js 1f
pxor %xmm0, %xmm0 // if < 2^63,
@@ -53,4 +53,4 @@ FUNCTION(__i64_utod)
cvtsi2sdq %rax, %xmm0 // convert as if signed
addsd %xmm0, %xmm0 // multiply result by 2.0
ret
-ENDFUNCTION(__i64_utod)
+ENDFUNCTION(__compcert_i64_utod)
diff --git a/runtime/x86_64/i64_utof.S b/runtime/x86_64/i64_utof.S
index d0935341..63a33920 100644
--- a/runtime/x86_64/i64_utof.S
+++ b/runtime/x86_64/i64_utof.S
@@ -38,7 +38,7 @@
// Conversion unsigned long -> single-precision float
-FUNCTION(__i64_utof)
+FUNCTION(__compcert_i64_utof)
testq %rdi, %rdi
js 1f
pxor %xmm0, %xmm0 // if < 2^63,
@@ -53,4 +53,4 @@ FUNCTION(__i64_utof)
cvtsi2ssq %rax, %xmm0 // convert as if signed
addss %xmm0, %xmm0 // multiply result by 2.0
ret
-ENDFUNCTION(__i64_utof)
+ENDFUNCTION(__compcert_i64_utof)