aboutsummaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorCyril SIX <cyril.six@kalray.eu>2020-04-01 11:41:16 +0200
committerCyril SIX <cyril.six@kalray.eu>2020-04-01 11:41:16 +0200
commitf54e11c4f80cfa1b05a5c407adb1df86d1470cf7 (patch)
treea50c7734ec877e8ee0ac24e81356d6b8ea5b09a8 /runtime
parent7ad6991534ba4ab10fe29d5456393f45cb4e5605 (diff)
parent76a4ff8f5b37429a614a2a97f628d9d862c93f46 (diff)
downloadcompcert-kvx-f54e11c4f80cfa1b05a5c407adb1df86d1470cf7.tar.gz
compcert-kvx-f54e11c4f80cfa1b05a5c407adb1df86d1470cf7.zip
Merge remote-tracking branch 'origin/master' into attempt-fix-mppa-work
Diffstat (limited to 'runtime')
-rw-r--r--runtime/arm/i64_stof.S9
-rw-r--r--runtime/powerpc/i64_stof.s17
-rw-r--r--runtime/powerpc/i64_utof.s10
-rw-r--r--runtime/powerpc64/i64_utof.s10
4 files changed, 22 insertions, 24 deletions
diff --git a/runtime/arm/i64_stof.S b/runtime/arm/i64_stof.S
index bcfa471c..11e00a2a 100644
--- a/runtime/arm/i64_stof.S
+++ b/runtime/arm/i64_stof.S
@@ -39,12 +39,11 @@
@@@ Conversion from signed 64-bit integer to single float
FUNCTION(__compcert_i64_stof)
- @ Check whether -2^53 <= X < 2^53
- ASR r2, Reg0HI, #21
- ASR r3, Reg0HI, #31 @ (r2,r3) = X >> 53
+ @ Check whether -2^53 <= X < 2^53
+ ASR r2, Reg0HI, #21 @ r2 = high 32 bits of X >> 53
+ @ -2^53 <= X < 2^53 iff r2 is -1 or 0, that is, iff r2 + 1 is 0 or 1
adds r2, r2, #1
- adc r3, r3, #0 @ (r2,r3) = X >> 53 + 1
- cmp r3, #2
+ cmp r2, #2
blo 1f
@ X is large enough that double rounding can occur.
@ Avoid it by nudging X away from the points where double rounding
diff --git a/runtime/powerpc/i64_stof.s b/runtime/powerpc/i64_stof.s
index 97fa6bb8..ea23a1c8 100644
--- a/runtime/powerpc/i64_stof.s
+++ b/runtime/powerpc/i64_stof.s
@@ -43,20 +43,19 @@
__compcert_i64_stof:
mflr r9
# Check whether -2^53 <= X < 2^53
- srawi r5, r3, 31
- srawi r6, r3, 21 # (r5,r6) = X >> 53
- addic r6, r6, 1
- addze r5, r5 # (r5,r6) = (X >> 53) + 1
+ srawi r5, r3, 21 # r5 = high 32 bits of X >> 53
+ # -2^53 <= X < 2^53 iff r5 is -1 or 0, that is, iff r5 + 1 is 0 or 1
+ addi r5, r5, 1
cmplwi r5, 2
blt 1f
# X is large enough that double rounding can occur.
# Avoid it by nudging X away from the points where double rounding
# occurs (the "round to odd" technique)
- rlwinm r0, r4, 0, 21, 31 # extract bits 0 to 11 of X
- addi r0, r0, 0x7FF # r0 = (X & 0x7FF) + 0x7FF
- # bit 12 of r0 is 0 if all low 12 bits of X are 0, 1 otherwise
- # bits 13-31 of r0 are 0
- or r4, r4, r0 # correct bit number 12 of X
+ rlwinm r5, r4, 0, 21, 31 # extract bits 0 to 11 of X
+ addi r5, r5, 0x7FF # r5 = (X & 0x7FF) + 0x7FF
+ # bit 12 of r5 is 0 if all low 12 bits of X are 0, 1 otherwise
+ # bits 13-31 of r5 are 0
+ or r4, r4, r5 # correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 # set to 0 bits 0 to 11 of X
# Convert to double, then round to single
1: bl __compcert_i64_stod
diff --git a/runtime/powerpc/i64_utof.s b/runtime/powerpc/i64_utof.s
index cdb2f867..4a2a172b 100644
--- a/runtime/powerpc/i64_utof.s
+++ b/runtime/powerpc/i64_utof.s
@@ -48,11 +48,11 @@ __compcert_i64_utof:
# X is large enough that double rounding can occur.
# Avoid it by nudging X away from the points where double rounding
# occurs (the "round to odd" technique)
- rlwinm r0, r4, 0, 21, 31 # extract bits 0 to 11 of X
- addi r0, r0, 0x7FF # r0 = (X & 0x7FF) + 0x7FF
- # bit 12 of r0 is 0 if all low 12 bits of X are 0, 1 otherwise
- # bits 13-31 of r0 are 0
- or r4, r4, r0 # correct bit number 12 of X
+ rlwinm r5, r4, 0, 21, 31 # extract bits 0 to 11 of X
+ addi r5, r5, 0x7FF # r5 = (X & 0x7FF) + 0x7FF
+ # bit 12 of r5 is 0 if all low 12 bits of X are 0, 1 otherwise
+ # bits 13-31 of r5 are 0
+ or r4, r4, r5 # correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 # set to 0 bits 0 to 11 of X
# Convert to double, then round to single
1: bl __compcert_i64_utod
diff --git a/runtime/powerpc64/i64_utof.s b/runtime/powerpc64/i64_utof.s
index cdb2f867..4a2a172b 100644
--- a/runtime/powerpc64/i64_utof.s
+++ b/runtime/powerpc64/i64_utof.s
@@ -48,11 +48,11 @@ __compcert_i64_utof:
# X is large enough that double rounding can occur.
# Avoid it by nudging X away from the points where double rounding
# occurs (the "round to odd" technique)
- rlwinm r0, r4, 0, 21, 31 # extract bits 0 to 11 of X
- addi r0, r0, 0x7FF # r0 = (X & 0x7FF) + 0x7FF
- # bit 12 of r0 is 0 if all low 12 bits of X are 0, 1 otherwise
- # bits 13-31 of r0 are 0
- or r4, r4, r0 # correct bit number 12 of X
+ rlwinm r5, r4, 0, 21, 31 # extract bits 0 to 11 of X
+ addi r5, r5, 0x7FF # r5 = (X & 0x7FF) + 0x7FF
+ # bit 12 of r5 is 0 if all low 12 bits of X are 0, 1 otherwise
+ # bits 13-31 of r5 are 0
+ or r4, r4, r5 # correct bit number 12 of X
rlwinm r4, r4, 0, 0, 20 # set to 0 bits 0 to 11 of X
# Convert to double, then round to single
1: bl __compcert_i64_utod