aboutsummaryrefslogtreecommitdiffstats
path: root/runtime
diff options
context:
space:
mode:
authorDavid Monniaux <david.monniaux@univ-grenoble-alpes.fr>2020-11-18 21:07:29 +0100
committerDavid Monniaux <david.monniaux@univ-grenoble-alpes.fr>2020-11-18 21:07:29 +0100
commit8384d27c122ec4ca4b7ad0f524df52b61a49c66a (patch)
treed86ff8780c4435d3b4fe92b5251e0f9b447b86c7 /runtime
parent362bdda28ca3c4dcc992575cbbe9400b64425990 (diff)
parente6e036b3f285d2f3ba2a5036a413eb9c7d7534cd (diff)
downloadcompcert-kvx-8384d27c122ec4ca4b7ad0f524df52b61a49c66a.tar.gz
compcert-kvx-8384d27c122ec4ca4b7ad0f524df52b61a49c66a.zip
Merge branch 'master' (Absint 3.8) into kvx-work-merge3.8
Diffstat (limited to 'runtime')
-rw-r--r--runtime/x86_64/i64_dtou.S10
-rw-r--r--runtime/x86_64/i64_utod.S18
-rw-r--r--runtime/x86_64/i64_utof.S18
-rw-r--r--runtime/x86_64/sysdeps.h18
-rw-r--r--runtime/x86_64/vararg.S63
5 files changed, 99 insertions, 28 deletions
diff --git a/runtime/x86_64/i64_dtou.S b/runtime/x86_64/i64_dtou.S
index cc822d67..7f12ae88 100644
--- a/runtime/x86_64/i64_dtou.S
+++ b/runtime/x86_64/i64_dtou.S
@@ -39,13 +39,13 @@
// Conversion float -> unsigned long
FUNCTION(__compcert_i64_dtou)
- ucomisd .LC1(%rip), %xmm0
+ ucomisd .LC1(%rip), FP_ARG_1
jnb 1f
- cvttsd2siq %xmm0, %rax
+ cvttsd2siq FP_ARG_1, INT_RES
ret
-1: subsd .LC1(%rip), %xmm0
- cvttsd2siq %xmm0, %rax
- addq .LC2(%rip), %rax
+1: subsd .LC1(%rip), FP_ARG_1
+ cvttsd2siq FP_ARG_1, INT_RES
+ addq .LC2(%rip), INT_RES
ret
.p2align 3
diff --git a/runtime/x86_64/i64_utod.S b/runtime/x86_64/i64_utod.S
index 62e6e484..4d4870fc 100644
--- a/runtime/x86_64/i64_utod.S
+++ b/runtime/x86_64/i64_utod.S
@@ -39,18 +39,18 @@
// Conversion unsigned long -> double-precision float
FUNCTION(__compcert_i64_utod)
- testq %rdi, %rdi
+ testq INT_ARG_1, INT_ARG_1
js 1f
- pxor %xmm0, %xmm0 // if < 2^63,
- cvtsi2sdq %rdi, %xmm0 // convert as if signed
+ pxor FP_RES, FP_RES // if < 2^63,
+ cvtsi2sdq INT_ARG_1, FP_RES // convert as if signed
ret
1: // if >= 2^63, use round-to-odd trick
- movq %rdi, %rax
+ movq INT_ARG_1, %rax
shrq %rax
- andq $1, %rdi
- orq %rdi, %rax // (arg >> 1) | (arg & 1)
- pxor %xmm0, %xmm0
- cvtsi2sdq %rax, %xmm0 // convert as if signed
- addsd %xmm0, %xmm0 // multiply result by 2.0
+ andq $1, INT_ARG_1
+ orq INT_ARG_1, %rax // (arg >> 1) | (arg & 1)
+ pxor FP_RES, FP_RES
+ cvtsi2sdq %rax, FP_RES // convert as if signed
+ addsd FP_RES, FP_RES // multiply result by 2.0
ret
ENDFUNCTION(__compcert_i64_utod)
diff --git a/runtime/x86_64/i64_utof.S b/runtime/x86_64/i64_utof.S
index 63a33920..0e878121 100644
--- a/runtime/x86_64/i64_utof.S
+++ b/runtime/x86_64/i64_utof.S
@@ -39,18 +39,18 @@
// Conversion unsigned long -> single-precision float
FUNCTION(__compcert_i64_utof)
- testq %rdi, %rdi
+ testq INT_ARG_1, INT_ARG_1
js 1f
- pxor %xmm0, %xmm0 // if < 2^63,
- cvtsi2ssq %rdi, %xmm0 // convert as if signed
+ pxor FP_RES, FP_RES // if < 2^63,
+ cvtsi2ssq INT_ARG_1, FP_RES // convert as if signed
ret
1: // if >= 2^63, use round-to-odd trick
- movq %rdi, %rax
+ movq INT_ARG_1, %rax
shrq %rax
- andq $1, %rdi
- orq %rdi, %rax // (arg >> 1) | (arg & 1)
- pxor %xmm0, %xmm0
- cvtsi2ssq %rax, %xmm0 // convert as if signed
- addss %xmm0, %xmm0 // multiply result by 2.0
+ andq $1, INT_ARG_1
+ orq INT_ARG_1, %rax // (arg >> 1) | (arg & 1)
+ pxor FP_RES, FP_RES
+ cvtsi2ssq %rax, FP_RES // convert as if signed
+ addss FP_RES, FP_RES // multiply result by 2.0
ret
ENDFUNCTION(__compcert_i64_utof)
diff --git a/runtime/x86_64/sysdeps.h b/runtime/x86_64/sysdeps.h
index e9d456af..aacef8f0 100644
--- a/runtime/x86_64/sysdeps.h
+++ b/runtime/x86_64/sysdeps.h
@@ -63,13 +63,25 @@ _##f:
#if defined(SYS_cygwin)
-#define GLOB(x) _##x
+#define GLOB(x) x
#define FUNCTION(f) \
.text; \
- .globl _##f; \
+ .globl f; \
.align 16; \
-_##f:
+f:
#define ENDFUNCTION(f)
#endif
+
+// Names for argument and result registers
+
+#if defined(SYS_cygwin)
+#define INT_ARG_1 %rcx
+#else
+#define INT_ARG_1 %rdi
+#endif
+#define FP_ARG_1 %xmm0
+#define INT_RES %rax
+#define FP_RES %xmm0
+
diff --git a/runtime/x86_64/vararg.S b/runtime/x86_64/vararg.S
index 9c0d787b..c5225b34 100644
--- a/runtime/x86_64/vararg.S
+++ b/runtime/x86_64/vararg.S
@@ -34,6 +34,12 @@
// Helper functions for variadic functions <stdarg.h>. x86_64 version.
+#include "sysdeps.h"
+
+// ELF ABI
+
+#if defined(SYS_linux) || defined(SYS_bsd) || defined(SYS_macosx)
+
// typedef struct {
// unsigned int gp_offset;
// unsigned int fp_offset;
@@ -60,8 +66,6 @@
// unsigned long long __compcert_va_int64(va_list ap);
// double __compcert_va_float64(va_list ap);
-#include "sysdeps.h"
-
FUNCTION(__compcert_va_int32)
movl 0(%rdi), %edx // edx = gp_offset
cmpl $48, %edx
@@ -146,3 +150,58 @@ FUNCTION(__compcert_va_saveregs)
movaps %xmm7, 160(%r10)
1: ret
ENDFUNCTION(__compcert_va_saveregs)
+
+#endif
+
+// Windows ABI
+
+#if defined(SYS_cygwin)
+
+// typedef void * va_list;
+// unsigned int __compcert_va_int32(va_list * ap);
+// unsigned long long __compcert_va_int64(va_list * ap);
+// double __compcert_va_float64(va_list * ap);
+
+FUNCTION(__compcert_va_int32) // %rcx = pointer to argument pointer
+ movq 0(%rcx), %rdx // %rdx = current argument pointer
+ movl 0(%rdx), %eax // load the int32 value there
+ addq $8, %rdx // increment argument pointer by 8
+ movq %rdx, 0(%rcx)
+ ret
+ENDFUNCTION(__compcert_va_int32)
+
+FUNCTION(__compcert_va_int64) // %rcx = pointer to argument pointer
+ movq 0(%rcx), %rdx // %rdx = current argument pointer
+ movq 0(%rdx), %rax // load the int64 value there
+ addq $8, %rdx // increment argument pointer by 8
+ movq %rdx, 0(%rcx)
+ ret
+ENDFUNCTION(__compcert_va_int64)
+
+FUNCTION(__compcert_va_float64) // %rcx = pointer to argument pointer
+ movq 0(%rcx), %rdx // %rdx = current argument pointer
+ movsd 0(%rdx), %xmm0 // load the float64 value there
+ addq $8, %rdx // increment argument pointer by 8
+ movq %rdx, 0(%rcx)
+ ret
+ENDFUNCTION(__compcert_va_float64)
+
+FUNCTION(__compcert_va_composite)
+ jmp GLOB(__compcert_va_int64) // by-ref convention, FIXME
+ENDFUNCTION(__compcert_va_composite)
+
+// Save arguments passed in register in the stack at beginning of vararg
+// function. The caller of the vararg function reserved 32 bytes of stack
+// just for this purpose.
+// FP arguments are passed both in FP registers and integer registers,
+// so it's enough to save the integer registers used for parameter passing.
+
+FUNCTION(__compcert_va_saveregs)
+ movq %rcx, 16(%rsp)
+ movq %rdx, 24(%rsp)
+ movq %r8, 32(%rsp)
+ movq %r9, 40(%rsp)
+ ret
+ENDFUNCTION(__compcert_va_saveregs)
+
+#endif