aboutsummaryrefslogtreecommitdiffstats
path: root/runtime/powerpc
diff options
context:
space:
mode:
authorBernhard Schommer <bernhardschommer@gmail.com>2017-08-25 14:55:22 +0200
committerBernhard Schommer <bernhardschommer@gmail.com>2017-08-25 14:55:22 +0200
commit4affb2b02e486681b39add0dbaf4f873a91885c8 (patch)
tree06b045ee98934b00c341071758c9b905ecad057f /runtime/powerpc
parent95ed4ea7df3e4b05d623afb9cb65f0eb2653361b (diff)
downloadcompcert-kvx-4affb2b02e486681b39add0dbaf4f873a91885c8.tar.gz
compcert-kvx-4affb2b02e486681b39add0dbaf4f873a91885c8.zip
Prefixed runtime functions.
The runtime functions are prefixed with compcert in order to avoid potential clashes with runtime/builtin functions of other compilers. Bug 22062
Diffstat (limited to 'runtime/powerpc')
-rw-r--r--runtime/powerpc/i64_dtos.s8
-rw-r--r--runtime/powerpc/i64_dtou.s8
-rw-r--r--runtime/powerpc/i64_sar.s8
-rw-r--r--runtime/powerpc/i64_sdiv.s10
-rw-r--r--runtime/powerpc/i64_shl.s8
-rw-r--r--runtime/powerpc/i64_shr.s8
-rw-r--r--runtime/powerpc/i64_smod.s10
-rw-r--r--runtime/powerpc/i64_smulh.s8
-rw-r--r--runtime/powerpc/i64_stod.s8
-rw-r--r--runtime/powerpc/i64_stof.s10
-rw-r--r--runtime/powerpc/i64_udiv.s10
-rw-r--r--runtime/powerpc/i64_udivmod.s18
-rw-r--r--runtime/powerpc/i64_umod.s10
-rw-r--r--runtime/powerpc/i64_umulh.s8
-rw-r--r--runtime/powerpc/i64_utod.s8
-rw-r--r--runtime/powerpc/i64_utof.s10
16 files changed, 75 insertions, 75 deletions
diff --git a/runtime/powerpc/i64_dtos.s b/runtime/powerpc/i64_dtos.s
index 9b1288f4..85c60b27 100644
--- a/runtime/powerpc/i64_dtos.s
+++ b/runtime/powerpc/i64_dtos.s
@@ -39,8 +39,8 @@
### Conversion from double float to signed long
.balign 16
- .globl __i64_dtos
-__i64_dtos:
+ .globl __compcert_i64_dtos
+__compcert_i64_dtos:
stfdu f1, -16(r1) # extract LO (r4) and HI (r3) halves of double
lwz r3, 0(r1)
lwz r4, 4(r1)
@@ -95,6 +95,6 @@ __i64_dtos:
5: lis r3, 0x8000 # result is MIN_SINT = 0x8000_0000
li r4, 0
blr
- .type __i64_dtos, @function
- .size __i64_dtos, .-__i64_dtos
+ .type __compcert_i64_dtos, @function
+ .size __compcert_i64_dtos, .-__compcert_i64_dtos
\ No newline at end of file
diff --git a/runtime/powerpc/i64_dtou.s b/runtime/powerpc/i64_dtou.s
index 78cd08b1..67a721d4 100644
--- a/runtime/powerpc/i64_dtou.s
+++ b/runtime/powerpc/i64_dtou.s
@@ -39,8 +39,8 @@
### Conversion from double float to unsigned long
.balign 16
- .globl __i64_dtou
-__i64_dtou:
+ .globl __compcert_i64_dtou
+__compcert_i64_dtou:
stfdu f1, -16(r1) # extract LO (r4) and HI (r3) halves of double
lwz r3, 0(r1)
lwz r4, 4(r1)
@@ -86,7 +86,7 @@ __i64_dtou:
2: li r3, -1 # result is MAX_UINT
li r4, -1
blr
- .type __i64_dtou, @function
- .size __i64_dtou, .-__i64_dtou
+ .type __compcert_i64_dtou, @function
+ .size __compcert_i64_dtou, .-__compcert_i64_dtou
\ No newline at end of file
diff --git a/runtime/powerpc/i64_sar.s b/runtime/powerpc/i64_sar.s
index 0fd410d4..c7da448f 100644
--- a/runtime/powerpc/i64_sar.s
+++ b/runtime/powerpc/i64_sar.s
@@ -39,8 +39,8 @@
# Shift right signed
.balign 16
- .globl __i64_sar
-__i64_sar:
+ .globl __compcert_i64_sar
+__compcert_i64_sar:
andi. r5, r5, 63 # take amount modulo 64
cmpwi r5, 32
bge 1f # amount < 32?
@@ -54,7 +54,7 @@ __i64_sar:
sraw r4, r3, r6 # RL = XH >>s (amount - 32)
srawi r3, r3, 31 # RL = sign extension of XH
blr
- .type __i64_sar, @function
- .size __i64_sar, .-__i64_sar
+ .type __compcert_i64_sar, @function
+ .size __compcert_i64_sar, .-__compcert_i64_sar
\ No newline at end of file
diff --git a/runtime/powerpc/i64_sdiv.s b/runtime/powerpc/i64_sdiv.s
index 411ad50c..9787ea3b 100644
--- a/runtime/powerpc/i64_sdiv.s
+++ b/runtime/powerpc/i64_sdiv.s
@@ -39,8 +39,8 @@
### Signed division
.balign 16
- .globl __i64_sdiv
-__i64_sdiv:
+ .globl __compcert_i64_sdiv
+__compcert_i64_sdiv:
mflr r0
stw r0, 4(r1) # save return address in caller's frame
xor r0, r3, r5 # compute sign of result (top bit)
@@ -55,7 +55,7 @@ __i64_sdiv:
xor r5, r5, r0
subfc r6, r0, r6
subfe r5, r0, r5
- bl __i64_udivmod # do unsigned division
+ bl __compcert_i64_udivmod # do unsigned division
lwz r0, 4(r1)
mtlr r0 # restore return address
mfctr r0
@@ -65,7 +65,7 @@ __i64_sdiv:
subfc r4, r0, r6
subfe r3, r0, r5
blr
- .type __i64_sdiv, @function
- .size __i64_sdiv, .-__i64_sdiv
+ .type __compcert_i64_sdiv, @function
+ .size __compcert_i64_sdiv, .-__compcert_i64_sdiv
\ No newline at end of file
diff --git a/runtime/powerpc/i64_shl.s b/runtime/powerpc/i64_shl.s
index d122068b..f6edb6c2 100644
--- a/runtime/powerpc/i64_shl.s
+++ b/runtime/powerpc/i64_shl.s
@@ -39,8 +39,8 @@
# Shift left
.balign 16
- .globl __i64_shl
-__i64_shl:
+ .globl __compcert_i64_shl
+__compcert_i64_shl:
# On PowerPC, shift instructions with amount mod 64 >= 32 return 0
# hi = (hi << amount) | (lo >> (32 - amount)) | (lo << (amount - 32))
# lo = lo << amount
@@ -59,6 +59,6 @@ __i64_shl:
or r3, r3, r0
slw r4, r4, r5
blr
- .type __i64_shl, @function
- .size __i64_shl, .-__i64_shl
+ .type __compcert_i64_shl, @function
+ .size __compcert_i64_shl, .-__compcert_i64_shl
\ No newline at end of file
diff --git a/runtime/powerpc/i64_shr.s b/runtime/powerpc/i64_shr.s
index fb7dc5cc..b634aafd 100644
--- a/runtime/powerpc/i64_shr.s
+++ b/runtime/powerpc/i64_shr.s
@@ -39,8 +39,8 @@
# Shift right unsigned
.balign 16
- .globl __i64_shr
-__i64_shr:
+ .globl __compcert_i64_shr
+__compcert_i64_shr:
# On PowerPC, shift instructions with amount mod 64 >= 32 return 0
# lo = (lo >> amount) | (hi << (32 - amount)) | (hi >> (amount - 32))
# hi = hi >> amount
@@ -59,7 +59,7 @@ __i64_shr:
or r4, r4, r0
srw r3, r3, r5
blr
- .type __i64_shr, @function
- .size __i64_shr, .-__i64_shr
+ .type __compcert_i64_shr, @function
+ .size __compcert_i64_shr, .-__compcert_i64_shr
\ No newline at end of file
diff --git a/runtime/powerpc/i64_smod.s b/runtime/powerpc/i64_smod.s
index df6bfd8e..6b4e1f89 100644
--- a/runtime/powerpc/i64_smod.s
+++ b/runtime/powerpc/i64_smod.s
@@ -39,8 +39,8 @@
## Signed remainder
.balign 16
- .globl __i64_smod
-__i64_smod:
+ .globl __compcert_i64_smod
+__compcert_i64_smod:
mflr r0
stw r0, 4(r1) # save return address in caller's frame
mtctr r3 # save sign of result in CTR (sign of N)
@@ -54,7 +54,7 @@ __i64_smod:
xor r5, r5, r0
subfc r6, r0, r6
subfe r5, r0, r5
- bl __i64_udivmod # do unsigned division
+ bl __compcert_i64_udivmod # do unsigned division
lwz r0, 4(r1)
mtlr r0 # restore return address
mfctr r0
@@ -64,7 +64,7 @@ __i64_smod:
subfc r4, r0, r4
subfe r3, r0, r3
blr
- .type __i64_smod, @function
- .size __i64_smod, .-__i64_smod
+ .type __compcert_i64_smod, @function
+ .size __compcert_i64_smod, .-__compcert_i64_smod
\ No newline at end of file
diff --git a/runtime/powerpc/i64_smulh.s b/runtime/powerpc/i64_smulh.s
index f01855f3..73393fce 100644
--- a/runtime/powerpc/i64_smulh.s
+++ b/runtime/powerpc/i64_smulh.s
@@ -44,8 +44,8 @@
# - subtract Y if X < 0
.balign 16
- .globl __i64_smulh
-__i64_smulh:
+ .globl __compcert_i64_smulh
+__compcert_i64_smulh:
# r7:r8:r9 accumulate bits 127:32 of the full unsigned product
mulhwu r9, r4, r6 # r9 = high half of XL.YL
mullw r0, r4, r5 # r0 = low half of XL.YH
@@ -75,6 +75,6 @@ __i64_smulh:
subfc r4, r6, r8 # subtract Y
subfe r3, r5, r7
blr
- .type __i64_smulh, @function
- .size __i64_smulh, .-__i64_smulh
+ .type __compcert_i64_smulh, @function
+ .size __compcert_i64_smulh, .-__compcert_i64_smulh
diff --git a/runtime/powerpc/i64_stod.s b/runtime/powerpc/i64_stod.s
index cca109ba..0c1ab720 100644
--- a/runtime/powerpc/i64_stod.s
+++ b/runtime/powerpc/i64_stod.s
@@ -37,8 +37,8 @@
### Conversion from signed long to double float
.balign 16
- .globl __i64_stod
-__i64_stod:
+ .globl __compcert_i64_stod
+__compcert_i64_stod:
addi r1, r1, -16
lis r5, 0x4330
li r6, 0
@@ -62,6 +62,6 @@ __i64_stod:
fadd f1, f1, f2 # add both to get result
addi r1, r1, 16
blr
- .type __i64_stod, @function
- .size __i64_stod, .-__i64_stod
+ .type __compcert_i64_stod, @function
+ .size __compcert_i64_stod, .-__compcert_i64_stod
diff --git a/runtime/powerpc/i64_stof.s b/runtime/powerpc/i64_stof.s
index 05b36a78..97fa6bb8 100644
--- a/runtime/powerpc/i64_stof.s
+++ b/runtime/powerpc/i64_stof.s
@@ -39,8 +39,8 @@
### Conversion from signed long to single float
.balign 16
- .globl __i64_stof
-__i64_stof:
+ .globl __compcert_i64_stof
+__compcert_i64_stof:
mflr r9
# Check whether -2^53 <= X < 2^53
srawi r5, r3, 31
@@ -59,10 +59,10 @@ __i64_stof:
or r4, r4, r0 # correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 # set to 0 bits 0 to 11 of X
# Convert to double, then round to single
-1: bl __i64_stod
+1: bl __compcert_i64_stod
mtlr r9
frsp f1, f1
blr
- .type __i64_stof, @function
- .size __i64_stof, .-__i64_stof
+ .type __compcert_i64_stof, @function
+ .size __compcert_i64_stof, .-__compcert_i64_stof
diff --git a/runtime/powerpc/i64_udiv.s b/runtime/powerpc/i64_udiv.s
index 9443d59b..e2da855a 100644
--- a/runtime/powerpc/i64_udiv.s
+++ b/runtime/powerpc/i64_udiv.s
@@ -39,16 +39,16 @@
### Unsigned division
.balign 16
- .globl __i64_udiv
-__i64_udiv:
+ .globl __compcert_i64_udiv
+__compcert_i64_udiv:
mflr r0
stw r0, 4(r1) # save return address in caller's frame
- bl __i64_udivmod # unsigned divide
+ bl __compcert_i64_udivmod # unsigned divide
lwz r0, 4(r1)
mtlr r0 # restore return address
mr r3, r5 # result = quotient
mr r4, r6
blr
- .type __i64_udiv, @function
- .size __i64_udiv, .-__i64_udiv
+ .type __compcert_i64_udiv, @function
+ .size __compcert_i64_udiv, .-__compcert_i64_udiv
diff --git a/runtime/powerpc/i64_udivmod.s b/runtime/powerpc/i64_udivmod.s
index 826d9896..e81c6cef 100644
--- a/runtime/powerpc/i64_udivmod.s
+++ b/runtime/powerpc/i64_udivmod.s
@@ -45,9 +45,9 @@
# Output: quotient Q in (r5,r6), remainder R in (r3,r4)
# Destroys: all integer caller-save registers
- .globl __i64_udivmod
+ .globl __compcert_i64_udivmod
.balign 16
-__i64_udivmod:
+__compcert_i64_udivmod:
cmplwi r5, 0 # DH == 0 ?
stwu r1, -32(r1)
mflr r0
@@ -73,7 +73,7 @@ __i64_udivmod:
srw r6, r6, r8
or r5, r6, r0
# Divide N' by D' to get an approximate quotient Q
- bl __i64_udiv6432 # r3 = quotient, r4 = remainder
+ bl __compcert_i64_udiv6432 # r3 = quotient, r4 = remainder
mr r6, r3 # low half of quotient Q
li r5, 0 # high half of quotient is 0
# Tentative quotient is either correct or one too high
@@ -112,7 +112,7 @@ __i64_udivmod:
mullw r0, r31, r6
subf r3, r0, r3 # NH is remainder of this division
mr r5, r6
- bl __i64_udiv6432 # divide NH : NL by DL
+ bl __compcert_i64_udiv6432 # divide NH : NL by DL
mr r5, r31 # high word of quotient
mr r6, r3 # low word of quotient
# r4 contains low word of remainder
@@ -133,8 +133,8 @@ __i64_udivmod:
addi r1, r1, 32
blr
- .type __i64_udivmod, @function
- .size __i64_udivmod, .-__i64_udivmod
+ .type __compcert_i64_udivmod, @function
+ .size __compcert_i64_udivmod, .-__compcert_i64_udivmod
# Auxiliary division function: 64 bit integer divided by 32 bit integer
# Not exported
@@ -144,7 +144,7 @@ __i64_udivmod:
# Assumes: high word of N is less than D
.balign 16
-__i64_udiv6432:
+__compcert_i64_udiv6432:
# Algorithm 9.3 from Hacker's Delight, section 9.4
# Initially: u1 in r3, u0 in r4, v in r5
# s = __builtin_clz(v);
@@ -230,5 +230,5 @@ __i64_udiv6432:
add r3, r0, r3
blr
- .type __i64_udiv6432, @function
- .size __i64_udiv6432,.-__i64_udiv6432
+ .type __compcert_i64_udiv6432, @function
+ .size __compcert_i64_udiv6432,.-__compcert_i64_udiv6432
diff --git a/runtime/powerpc/i64_umod.s b/runtime/powerpc/i64_umod.s
index a4f23c98..bf8d6121 100644
--- a/runtime/powerpc/i64_umod.s
+++ b/runtime/powerpc/i64_umod.s
@@ -39,9 +39,9 @@
### Unsigned modulus
.balign 16
- .globl __i64_umod
-__i64_umod:
- b __i64_udivmod
- .type __i64_umod, @function
- .size __i64_umod, .-__i64_umod
+ .globl __compcert_i64_umod
+__compcert_i64_umod:
+ b __compcert_i64_udivmod
+ .type __compcert_i64_umod, @function
+ .size __compcert_i64_umod, .-__compcert_i64_umod
diff --git a/runtime/powerpc/i64_umulh.s b/runtime/powerpc/i64_umulh.s
index 1c609466..53b72948 100644
--- a/runtime/powerpc/i64_umulh.s
+++ b/runtime/powerpc/i64_umulh.s
@@ -41,8 +41,8 @@
# X * Y = 2^64 XH.YH + 2^32 (XH.YL + XL.YH) + XL.YL
.balign 16
- .globl __i64_umulh
-__i64_umulh:
+ .globl __compcert_i64_umulh
+__compcert_i64_umulh:
# r7:r8:r9 accumulate bits 127:32 of the full product
mulhwu r9, r4, r6 # r9 = high half of XL.YL
mullw r0, r4, r5 # r0 = low half of XL.YH
@@ -60,6 +60,6 @@ __i64_umulh:
mulhwu r0, r3, r5 # r0 = high half of XH.YH
adde r3, r7, r0
blr
- .type __i64_umulh, @function
- .size __i64_umulh, .-__i64_umulh
+ .type __compcert_i64_umulh, @function
+ .size __compcert_i64_umulh, .-__compcert_i64_umulh
diff --git a/runtime/powerpc/i64_utod.s b/runtime/powerpc/i64_utod.s
index 01a27583..69de6fdb 100644
--- a/runtime/powerpc/i64_utod.s
+++ b/runtime/powerpc/i64_utod.s
@@ -39,8 +39,8 @@
### Conversion from unsigned long to double float
.balign 16
- .globl __i64_utod
-__i64_utod:
+ .globl __compcert_i64_utod
+__compcert_i64_utod:
addi r1, r1, -16
lis r5, 0x4330
li r6, 0
@@ -61,6 +61,6 @@ __i64_utod:
fadd f1, f1, f2 # add both to get result
addi r1, r1, 16
blr
- .type __i64_utod, @function
- .size __i64_utod, .-__i64_utod
+ .type __compcert_i64_utod, @function
+ .size __compcert_i64_utod, .-__compcert_i64_utod
diff --git a/runtime/powerpc/i64_utof.s b/runtime/powerpc/i64_utof.s
index 2617cbda..cdb2f867 100644
--- a/runtime/powerpc/i64_utof.s
+++ b/runtime/powerpc/i64_utof.s
@@ -39,8 +39,8 @@
### Conversion from unsigned long to single float
.balign 16
- .globl __i64_utof
-__i64_utof:
+ .globl __compcert_i64_utof
+__compcert_i64_utof:
mflr r9
# Check whether X < 2^53
andis. r0, r3, 0xFFE0 # test bits 53...63 of X
@@ -55,10 +55,10 @@ __i64_utof:
or r4, r4, r0 # correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 # set to 0 bits 0 to 11 of X
# Convert to double, then round to single
-1: bl __i64_utod
+1: bl __compcert_i64_utod
mtlr r9
frsp f1, f1
blr
- .type __i64_utof, @function
- .size __i64_utof, .-__i64_utof
+ .type __compcert_i64_utof, @function
+ .size __compcert_i64_utof, .-__compcert_i64_utof