aboutsummaryrefslogtreecommitdiffstats
path: root/runtime/x86_32
diff options
context:
space:
mode:
authorBernhard Schommer <bernhardschommer@gmail.com>2017-08-25 14:55:22 +0200
committerBernhard Schommer <bernhardschommer@gmail.com>2017-08-25 14:55:22 +0200
commit4affb2b02e486681b39add0dbaf4f873a91885c8 (patch)
tree06b045ee98934b00c341071758c9b905ecad057f /runtime/x86_32
parent95ed4ea7df3e4b05d623afb9cb65f0eb2653361b (diff)
downloadcompcert-kvx-4affb2b02e486681b39add0dbaf4f873a91885c8.tar.gz
compcert-kvx-4affb2b02e486681b39add0dbaf4f873a91885c8.zip
Prefixed runtime functions.
The runtime functions are prefixed with compcert in order to avoid potential clashes with runtime/builtin functions of other compilers. Bug 22062
Diffstat (limited to 'runtime/x86_32')
-rw-r--r--runtime/x86_32/i64_dtos.S4
-rw-r--r--runtime/x86_32/i64_dtou.S4
-rw-r--r--runtime/x86_32/i64_sar.S4
-rw-r--r--runtime/x86_32/i64_sdiv.S6
-rw-r--r--runtime/x86_32/i64_shl.S4
-rw-r--r--runtime/x86_32/i64_shr.S4
-rw-r--r--runtime/x86_32/i64_smod.S6
-rw-r--r--runtime/x86_32/i64_smulh.S4
-rw-r--r--runtime/x86_32/i64_stod.S4
-rw-r--r--runtime/x86_32/i64_stof.S4
-rw-r--r--runtime/x86_32/i64_udiv.S6
-rw-r--r--runtime/x86_32/i64_udivmod.S4
-rw-r--r--runtime/x86_32/i64_umod.S6
-rw-r--r--runtime/x86_32/i64_umulh.S4
-rw-r--r--runtime/x86_32/i64_utod.S4
-rw-r--r--runtime/x86_32/i64_utof.S4
16 files changed, 36 insertions, 36 deletions
diff --git a/runtime/x86_32/i64_dtos.S b/runtime/x86_32/i64_dtos.S
index 3cc381bf..ccc0013c 100644
--- a/runtime/x86_32/i64_dtos.S
+++ b/runtime/x86_32/i64_dtos.S
@@ -38,7 +38,7 @@
// Conversion float -> signed long
-FUNCTION(__i64_dtos)
+FUNCTION(__compcert_i64_dtos)
subl $4, %esp
// Change rounding mode to "round towards zero"
fnstcw 0(%esp)
@@ -56,5 +56,5 @@ FUNCTION(__i64_dtos)
movl 12(%esp), %edx
addl $4, %esp
ret
-ENDFUNCTION(__i64_dtos)
+ENDFUNCTION(__compcert_i64_dtos)
diff --git a/runtime/x86_32/i64_dtou.S b/runtime/x86_32/i64_dtou.S
index 4903f847..1115328d 100644
--- a/runtime/x86_32/i64_dtou.S
+++ b/runtime/x86_32/i64_dtou.S
@@ -38,7 +38,7 @@
// Conversion float -> unsigned long
-FUNCTION(__i64_dtou)
+FUNCTION(__compcert_i64_dtou)
subl $4, %esp
// Compare argument with 2^63
fldl 8(%esp)
@@ -84,5 +84,5 @@ FUNCTION(__i64_dtou)
.p2align 2
LC1: .long 0x5f000000 // 2^63 in single precision
-ENDFUNCTION(__i64_dtou)
+ENDFUNCTION(__compcert_i64_dtou)
\ No newline at end of file
diff --git a/runtime/x86_32/i64_sar.S b/runtime/x86_32/i64_sar.S
index cf2233b1..d62d0d69 100644
--- a/runtime/x86_32/i64_sar.S
+++ b/runtime/x86_32/i64_sar.S
@@ -40,7 +40,7 @@
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
-FUNCTION(__i64_sar)
+FUNCTION(__compcert_i64_sar)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
@@ -56,5 +56,5 @@ FUNCTION(__i64_sar)
sarl %cl, %eax // eax = XH >> (amount - 32)
sarl $31, %edx // edx = sign of X
ret
-ENDFUNCTION(__i64_sar)
+ENDFUNCTION(__compcert_i64_sar)
diff --git a/runtime/x86_32/i64_sdiv.S b/runtime/x86_32/i64_sdiv.S
index f6551c7d..2da5706c 100644
--- a/runtime/x86_32/i64_sdiv.S
+++ b/runtime/x86_32/i64_sdiv.S
@@ -38,7 +38,7 @@
// Signed division
-FUNCTION(__i64_sdiv)
+FUNCTION(__compcert_i64_sdiv)
pushl %ebp
pushl %esi
pushl %edi
@@ -58,7 +58,7 @@ FUNCTION(__i64_sdiv)
adcl $0, %esi
negl %esi
movl %esi, 28(%esp)
-2: call GLOB(__i64_udivmod)
+2: call GLOB(__compcert_i64_udivmod)
testl %ebp, %ebp // apply sign to result
jge 3f
negl %esi
@@ -70,5 +70,5 @@ FUNCTION(__i64_sdiv)
popl %esi
popl %ebp
ret
-ENDFUNCTION(__i64_sdiv)
+ENDFUNCTION(__compcert_i64_sdiv)
diff --git a/runtime/x86_32/i64_shl.S b/runtime/x86_32/i64_shl.S
index 1fabebce..78f32cd6 100644
--- a/runtime/x86_32/i64_shl.S
+++ b/runtime/x86_32/i64_shl.S
@@ -40,7 +40,7 @@
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
-FUNCTION(__i64_shl)
+FUNCTION(__compcert_i64_shl)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
@@ -55,5 +55,5 @@ FUNCTION(__i64_shl)
shll %cl, %edx // edx = XL << (amount - 32)
xorl %eax, %eax // eax = 0
ret
-ENDFUNCTION(__i64_shl)
+ENDFUNCTION(__compcert_i64_shl)
diff --git a/runtime/x86_32/i64_shr.S b/runtime/x86_32/i64_shr.S
index 34196f09..36d970fc 100644
--- a/runtime/x86_32/i64_shr.S
+++ b/runtime/x86_32/i64_shr.S
@@ -40,7 +40,7 @@
// Note: IA32 shift instructions treat their amount (in %cl) modulo 32
-FUNCTION(__i64_shr)
+FUNCTION(__compcert_i64_shr)
movl 12(%esp), %ecx // ecx = shift amount, treated mod 64
testb $32, %cl
jne 1f
@@ -55,5 +55,5 @@ FUNCTION(__i64_shr)
shrl %cl, %eax // eax = XH >> (amount - 32)
xorl %edx, %edx // edx = 0
ret
-ENDFUNCTION(__i64_shr)
+ENDFUNCTION(__compcert_i64_shr)
diff --git a/runtime/x86_32/i64_smod.S b/runtime/x86_32/i64_smod.S
index 28f47ad4..f2069d69 100644
--- a/runtime/x86_32/i64_smod.S
+++ b/runtime/x86_32/i64_smod.S
@@ -38,7 +38,7 @@
// Signed remainder
-FUNCTION(__i64_smod)
+FUNCTION(__compcert_i64_smod)
pushl %ebp
pushl %esi
pushl %edi
@@ -57,7 +57,7 @@ FUNCTION(__i64_smod)
adcl $0, %esi
negl %esi
movl %esi, 28(%esp)
-2: call GLOB(__i64_udivmod)
+2: call GLOB(__compcert_i64_udivmod)
testl %ebp, %ebp // apply sign to result
jge 3f
negl %eax
@@ -67,4 +67,4 @@ FUNCTION(__i64_smod)
popl %esi
popl %ebp
ret
-ENDFUNCTION(__i64_smod)
+ENDFUNCTION(__compcert_i64_smod)
diff --git a/runtime/x86_32/i64_smulh.S b/runtime/x86_32/i64_smulh.S
index cc0f0167..618f40ba 100644
--- a/runtime/x86_32/i64_smulh.S
+++ b/runtime/x86_32/i64_smulh.S
@@ -48,7 +48,7 @@
// - subtract X if Y < 0
// - subtract Y if X < 0
-FUNCTION(__i64_smulh)
+FUNCTION(__compcert_i64_smulh)
pushl %esi
pushl %edi
movl XL, %eax
@@ -91,4 +91,4 @@ FUNCTION(__i64_smulh)
popl %edi
popl %esi
ret
-ENDFUNCTION(__i64_smulh)
+ENDFUNCTION(__compcert_i64_smulh)
diff --git a/runtime/x86_32/i64_stod.S b/runtime/x86_32/i64_stod.S
index d020e2fc..8faf480f 100644
--- a/runtime/x86_32/i64_stod.S
+++ b/runtime/x86_32/i64_stod.S
@@ -38,12 +38,12 @@
// Conversion signed long -> double-precision float
-FUNCTION(__i64_stod)
+FUNCTION(__compcert_i64_stod)
fildll 4(%esp)
ret
// The result is in extended precision (80 bits) and therefore
// exact (64 bits of mantissa). It will be rounded to double
// precision by the caller, when transferring the result
// to an XMM register or a 64-bit stack slot.
-ENDFUNCTION(__i64_stod)
+ENDFUNCTION(__compcert_i64_stod)
diff --git a/runtime/x86_32/i64_stof.S b/runtime/x86_32/i64_stof.S
index 25b1d4f7..4b5817ac 100644
--- a/runtime/x86_32/i64_stof.S
+++ b/runtime/x86_32/i64_stof.S
@@ -38,12 +38,12 @@
// Conversion signed long -> single-precision float
-FUNCTION(__i64_stof)
+FUNCTION(__compcert_i64_stof)
fildll 4(%esp)
// The TOS is in extended precision and therefore exact.
// Force rounding to single precision
fstps 4(%esp)
flds 4(%esp)
ret
-ENDFUNCTION(__i64_stof)
+ENDFUNCTION(__compcert_i64_stof)
diff --git a/runtime/x86_32/i64_udiv.S b/runtime/x86_32/i64_udiv.S
index 75305433..c9ae64f6 100644
--- a/runtime/x86_32/i64_udiv.S
+++ b/runtime/x86_32/i64_udiv.S
@@ -38,15 +38,15 @@
// Unsigned division
-FUNCTION(__i64_udiv)
+FUNCTION(__compcert_i64_udiv)
pushl %ebp
pushl %esi
pushl %edi
- call GLOB(__i64_udivmod)
+ call GLOB(__compcert_i64_udivmod)
movl %esi, %eax
movl %edi, %edx
popl %edi
popl %esi
popl %ebp
ret
-ENDFUNCTION(__i64_udiv)
+ENDFUNCTION(__compcert_i64_udiv)
diff --git a/runtime/x86_32/i64_udivmod.S b/runtime/x86_32/i64_udivmod.S
index dccfc286..a5d42fa5 100644
--- a/runtime/x86_32/i64_udivmod.S
+++ b/runtime/x86_32/i64_udivmod.S
@@ -45,7 +45,7 @@
// eax:edx is remainder R
// ebp is preserved
-FUNCTION(__i64_udivmod)
+FUNCTION(__compcert_i64_udivmod)
cmpl $0, 32(%esp) // single-word divisor? (DH = 0)
jne 1f
// Special case 64 bits divided by 32 bits
@@ -101,4 +101,4 @@ FUNCTION(__i64_udivmod)
5: decl %esi // adjust Q down by 1
jmp 3b // and redo check & computation of remainder
-ENDFUNCTION(__i64_udivmod)
+ENDFUNCTION(__compcert_i64_udivmod)
diff --git a/runtime/x86_32/i64_umod.S b/runtime/x86_32/i64_umod.S
index a019df28..241a687b 100644
--- a/runtime/x86_32/i64_umod.S
+++ b/runtime/x86_32/i64_umod.S
@@ -38,14 +38,14 @@
// Unsigned remainder
-FUNCTION(__i64_umod)
+FUNCTION(__compcert_i64_umod)
pushl %ebp
pushl %esi
pushl %edi
- call GLOB(__i64_udivmod)
+ call GLOB(__compcert_i64_udivmod)
popl %edi
popl %esi
popl %ebp
ret
-ENDFUNCTION(__i64_umod)
+ENDFUNCTION(__compcert_i64_umod)
diff --git a/runtime/x86_32/i64_umulh.S b/runtime/x86_32/i64_umulh.S
index 449a0f8b..2dba0975 100644
--- a/runtime/x86_32/i64_umulh.S
+++ b/runtime/x86_32/i64_umulh.S
@@ -45,7 +45,7 @@
// X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
-FUNCTION(__i64_umulh)
+FUNCTION(__compcert_i64_umulh)
pushl %esi
pushl %edi
movl XL, %eax
@@ -70,5 +70,5 @@ FUNCTION(__i64_umulh)
popl %edi
popl %esi
ret
-ENDFUNCTION(__i64_umulh)
+ENDFUNCTION(__compcert_i64_umulh)
diff --git a/runtime/x86_32/i64_utod.S b/runtime/x86_32/i64_utod.S
index 428a3b94..d7ec582f 100644
--- a/runtime/x86_32/i64_utod.S
+++ b/runtime/x86_32/i64_utod.S
@@ -38,7 +38,7 @@
// Conversion unsigned long -> double-precision float
-FUNCTION(__i64_utod)
+FUNCTION(__compcert_i64_utod)
fildll 4(%esp) // convert as if signed
cmpl $0, 8(%esp) // is argument >= 2^63?
jns 1f
@@ -52,4 +52,4 @@ FUNCTION(__i64_utod)
.p2align 2
LC1: .long 0x5f800000 // 2^64 in single precision
-ENDFUNCTION(__i64_utod)
+ENDFUNCTION(__compcert_i64_utod)
diff --git a/runtime/x86_32/i64_utof.S b/runtime/x86_32/i64_utof.S
index 0b58f48b..858caa37 100644
--- a/runtime/x86_32/i64_utof.S
+++ b/runtime/x86_32/i64_utof.S
@@ -38,7 +38,7 @@
// Conversion unsigned long -> single-precision float
-FUNCTION(__i64_utof)
+FUNCTION(__compcert_i64_utof)
fildll 4(%esp) // convert as if signed
cmpl $0, 8(%esp) // is argument >= 2^63?
jns 1f
@@ -52,4 +52,4 @@ FUNCTION(__i64_utof)
.p2align 2
LC1: .long 0x5f800000 // 2^64 in single precision
-ENDFUNCTION(__i64_utof)
+ENDFUNCTION(__compcert_i64_utof)