From 54f97d1988f623ba7422e13a504caeb5701ba93c Mon Sep 17 00:00:00 2001 From: Xavier Leroy Date: Fri, 21 Aug 2015 11:05:36 +0200 Subject: Refactoring of builtins and annotations in the back-end. Before, the back-end languages had distinct instructions - Iannot for annotations, taking structured expressions (annot_arg) as arguments, and producing no results' - Ibuiltin for other builtins, using simple pseudoregs/locations/registers as arguments and results. This branch enriches Ibuiltin instructions so that they take structured expressions (builtin_arg and builtin_res) as arguments and results. This way, - Annotations fit the general pattern of builtin functions, so Iannot instructions are removed. - EF_vload_global and EF_vstore_global become useless, as the same optimization can be achieved by EF_vload/vstore taking a structured argument of the "address of global" kind. - Better code can be generated for builtin_memcpy between stack locations, or volatile accesses to stack locations. Finally, this commit also introduces a new kind of external function, EF_debug, which is like EF_annot but produces no observable events. It will be used later to transport debug info through the back-end, without preventing optimizations. --- ia32/Asmexpand.ml | 168 +++++++++++++++++++++++++++--------------------------- 1 file changed, 84 insertions(+), 84 deletions(-) (limited to 'ia32/Asmexpand.ml') diff --git a/ia32/Asmexpand.ml b/ia32/Asmexpand.ml index e07672a6..9d8260b7 100644 --- a/ia32/Asmexpand.ml +++ b/ia32/Asmexpand.ml @@ -59,77 +59,83 @@ let sp_adjustment sz = (* Handling of annotations *) let expand_annot_val txt targ args res = - emit (Pannot (EF_annot(txt,[targ]), List.map (fun r -> AA_base r) args)); + emit (Pbuiltin (EF_annot(txt,[targ]), args, BR_none)); match args, res with - | [IR src], [IR dst] -> + | [BA(IR src)], BR(IR dst) -> if dst <> src then emit (Pmov_rr (dst,src)) - | [FR src], [FR dst] -> + | [BA(FR src)], BR(FR dst) -> if dst <> src then emit (Pmovsd_ff (dst,src)) | _, _ -> assert false - +(* Translate a builtin argument into an addressing mode *) + +let addressing_of_builtin_arg = function + | BA (IR r) -> Addrmode(Some r, None, Coq_inl Integers.Int.zero) + | BA_addrstack ofs -> Addrmode(Some ESP, None, Coq_inl ofs) + | BA_addrglobal(id, ofs) -> Addrmode(None, None, Coq_inr(id, ofs)) + | _ -> assert false + +let offset_addressing (Addrmode(base, ofs, cst)) delta = + Addrmode(base, ofs, + match cst with + | Coq_inl n -> Coq_inl(Integers.Int.add n delta) + | Coq_inr(id, n) -> Coq_inr(id, Integers.Int.add n delta)) + (* Handling of memcpy *) (* Unaligned memory accesses are quite fast on IA32, so use large memory accesses regardless of alignment. *) let expand_builtin_memcpy_small sz al src dst = - assert (src = EDX && dst = EAX); - let rec copy ofs sz = + let rec copy src dst sz = if sz >= 8 && !Clflags.option_ffpu then begin - emit (Pmovq_rm (XMM7,Addrmode (Some src, None, Coq_inl ofs))); - emit (Pmovq_mr (Addrmode (Some src, None, Coq_inl ofs),XMM7)); - copy (Int.add ofs _8) (sz - 8) + emit (Pmovq_rm (XMM7, src)); + emit (Pmovq_mr (dst, XMM7)); + copy (offset_addressing src _8) (offset_addressing dst _8) (sz - 8) end else if sz >= 4 then begin - emit (Pmov_rm (ECX,Addrmode (Some src, None, Coq_inl ofs))); - emit (Pmov_mr (Addrmode (Some src, None, Coq_inl ofs),ECX)); - copy (Int.add ofs _4) (sz - 4) + emit (Pmov_rm (ECX, src)); + emit (Pmov_mr (dst, ECX)); + copy (offset_addressing src _4) (offset_addressing dst _4) (sz - 4) end else if sz >= 2 then begin - emit (Pmovw_rm (ECX,Addrmode (Some src, None, Coq_inl ofs))); - emit (Pmovw_mr (Addrmode (Some src, None, Coq_inl ofs),ECX)); - copy (Int.add ofs _2) (sz - 2) + emit (Pmovw_rm (ECX, src)); + emit (Pmovw_mr (dst, ECX)); + copy (offset_addressing src _2) (offset_addressing dst _2) (sz - 2) end else if sz >= 1 then begin - emit (Pmovb_rm (ECX,Addrmode (Some src, None, Coq_inl ofs))); - emit (Pmovb_mr (Addrmode (Some src, None, Coq_inl ofs),ECX)); - copy (Int.add ofs _1) (sz - 1) + emit (Pmovb_rm (ECX, src)); + emit (Pmovb_mr (dst, ECX)); + copy (offset_addressing src _1) (offset_addressing dst _1) (sz - 1) end in - copy _0 sz + copy (addressing_of_builtin_arg src) (addressing_of_builtin_arg dst) sz let expand_builtin_memcpy_big sz al src dst = - assert (src = ESI && dst = EDI); + if src <> BA (IR ESI) then emit (Plea (ESI, addressing_of_builtin_arg src)); + if dst <> BA (IR EDI) then emit (Plea (EDI, addressing_of_builtin_arg dst)); emit (Pmov_ri (ECX,coqint_of_camlint (Int32.of_int (sz / 4)))); emit Prep_movsl; if sz mod 4 >= 2 then emit Pmovsw; if sz mod 2 >= 1 then emit Pmovsb let expand_builtin_memcpy sz al args = - let (dst, src) = - match args with [IR d; IR s] -> (d, s) | _ -> assert false in + let (dst, src) = match args with [d; s] -> (d, s) | _ -> assert false in if sz <= 32 then expand_builtin_memcpy_small sz al src dst else expand_builtin_memcpy_big sz al src dst (* Handling of volatile reads and writes *) -let offset_addressing (Addrmode(base, ofs, cst)) delta = - Addrmode(base, ofs, - match cst with - | Coq_inl n -> Coq_inl(Integers.Int.add n delta) - | Coq_inr(id, n) -> Coq_inr(id, Integers.Int.add n delta)) - let expand_builtin_vload_common chunk addr res = match chunk, res with - | Mint8unsigned, [IR res] -> + | Mint8unsigned, BR(IR res) -> emit (Pmovzb_rm (res,addr)) - | Mint8signed, [IR res] -> + | Mint8signed, BR(IR res) -> emit (Pmovsb_rm (res,addr)) - | Mint16unsigned, [IR res] -> + | Mint16unsigned, BR(IR res) -> emit (Pmovzw_rm (res,addr)) - | Mint16signed, [IR res] -> + | Mint16signed, BR(IR res) -> emit (Pmovsw_rm (res,addr)) - | Mint32, [IR res] -> + | Mint32, BR(IR res) -> emit (Pmov_rm (res,addr)) - | Mint64, [IR res1; IR res2] -> + | Mint64, BR_longofwords(BR(IR res1), BR(IR res2)) -> let addr' = offset_addressing addr (coqint_of_camlint 4l) in if not (Asmgen.addressing_mentions addr res2) then begin emit (Pmov_rm (res2,addr)); @@ -138,60 +144,51 @@ let expand_builtin_vload_common chunk addr res = emit (Pmov_rm (res1,addr')); emit (Pmov_rm (res2,addr)) end - | Mfloat32, [FR res] -> + | Mfloat32, BR(FR res) -> emit (Pmovss_fm (res,addr)) - | Mfloat64, [FR res] -> + | Mfloat64, BR(FR res) -> emit (Pmovsd_fm (res,addr)) | _ -> assert false let expand_builtin_vload chunk args res = match args with - | [IR addr] -> - expand_builtin_vload_common chunk (Addrmode (Some addr,None, Coq_inl _0)) res + | [addr] -> + expand_builtin_vload_common chunk (addressing_of_builtin_arg addr) res | _ -> assert false -let expand_builtin_vload_global chunk id ofs args res = - expand_builtin_vload_common chunk (Addrmode(None, None, Coq_inr(id,ofs))) res - let expand_builtin_vstore_common chunk addr src tmp = match chunk, src with - | (Mint8signed | Mint8unsigned), [IR src] -> + | (Mint8signed | Mint8unsigned), BA(IR src) -> if Asmgen.low_ireg src then emit (Pmovb_mr (addr,src)) else begin emit (Pmov_rr (tmp,src)); emit (Pmovb_mr (addr,tmp)) end - | (Mint16signed | Mint16unsigned), [IR src] -> + | (Mint16signed | Mint16unsigned), BA(IR src) -> emit (Pmovw_mr (addr,src)) - | Mint32, [IR src] -> + | Mint32, BA(IR src) -> emit (Pmov_mr (addr,src)) - | Mint64, [IR src1; IR src2] -> + | Mint64, BA_longofwords(BA(IR src1), BA(IR src2)) -> let addr' = offset_addressing addr (coqint_of_camlint 4l) in emit (Pmov_mr (addr,src2)); emit (Pmov_mr (addr',src1)) - | Mfloat32, [FR src] -> + | Mfloat32, BA(FR src) -> emit (Pmovss_mf (addr,src)) - | Mfloat64, [FR src] -> + | Mfloat64, BA(FR src) -> emit (Pmovsd_mf (addr,src)) | _ -> assert false let expand_builtin_vstore chunk args = match args with - | IR addr :: src -> - expand_builtin_vstore_common chunk - (Addrmode(Some addr, None, Coq_inl Integers.Int.zero)) src - (if addr = EAX then ECX else EAX) + | [addr; src] -> + let addr = addressing_of_builtin_arg addr in + expand_builtin_vstore_common chunk addr src + (if Asmgen.addressing_mentions addr EAX then ECX else EAX) | _ -> assert false - - -let expand_builtin_vstore_global chunk id ofs args = - expand_builtin_vstore_common chunk - (Addrmode(None, None, Coq_inr(id,ofs))) args EAX - (* Handling of varargs *) @@ -210,27 +207,27 @@ let expand_builtin_va_start r = let expand_builtin_inline name args res = match name, args, res with (* Integer arithmetic *) - | ("__builtin_bswap"| "__builtin_bswap32"), [IR a1], [IR res] -> + | ("__builtin_bswap"| "__builtin_bswap32"), [BA(IR a1)], BR(IR res) -> if a1 <> res then emit (Pmov_rr (res,a1)); emit (Pbswap res) - | "__builtin_bswap16", [IR a1], [IR res] -> + | "__builtin_bswap16", [BA(IR a1)], BR(IR res) -> if a1 <> res then emit (Pmov_rr (res,a1)); emit (Prolw_8 res) - | "__builtin_clz", [IR a1], [IR res] -> + | "__builtin_clz", [BA(IR a1)], BR(IR res) -> emit (Pbslr (a1,res)); emit (Pxor_ri(res,coqint_of_camlint 31l)) - | "__builtin_ctz", [IR a1], [IR res] -> + | "__builtin_ctz", [BA(IR a1)], BR(IR res) -> emit (Pbsfl (a1,res)) (* Float arithmetic *) - | "__builtin_fabs", [FR a1], [FR res] -> + | "__builtin_fabs", [BA(FR a1)], BR(FR res) -> if a1 <> res then emit (Pmovsd_ff (a1,res)); emit (Pabsd res) (* This ensures that need_masks is set to true *) - | "__builtin_fsqrt", [FR a1], [FR res] -> + | "__builtin_fsqrt", [BA(FR a1)], BR(FR res) -> emit (Psqrtsd (a1,res)) - | "__builtin_fmax", [FR a1; FR a2], [FR res] -> + | "__builtin_fmax", [BA(FR a1); BA(FR a2)], BR(FR res) -> if res = a1 then emit (Pmaxsd (a2,res)) else if res = a2 then @@ -239,7 +236,7 @@ let expand_builtin_inline name args res = emit (Pmovsd_ff (a1,res)); emit (Pmaxsd (a2,res)) end - | "__builtin_fmin", [FR a1; FR a2], [FR res] -> + | "__builtin_fmin", [BA(FR a1); BA(FR a2)], BR(FR res) -> if res = a1 then emit (Pminsd (a2,res)) else if res = a2 then @@ -248,7 +245,7 @@ let expand_builtin_inline name args res = emit (Pmovsd_ff (a1,res)); emit (Pminsd (a2,res)) end - | "__builtin_fmadd", [FR a1; FR a2; FR a3], [FR res] -> + | "__builtin_fmadd", [BA(FR a1); BA(FR a2); BA(FR a3)], BR(FR res) -> if res = a1 then emit (Pfmadd132 (a2,a3,res)) else if res = a2 then @@ -259,7 +256,7 @@ let expand_builtin_inline name args res = emit (Pmovsd_ff (a2,res)); emit (Pfmadd231 (a1,a2,res)) end - |"__builtin_fmsub", [FR a1; FR a2; FR a3], [FR res] -> + |"__builtin_fmsub", [BA(FR a1); BA(FR a2); BA(FR a3)], BR(FR res) -> if res = a1 then emit (Pfmsub132 (a2,a3,res)) else if res = a2 then @@ -270,7 +267,7 @@ let expand_builtin_inline name args res = emit (Pmovsd_ff (a2,res)); emit (Pfmsub231 (a1,a2,res)) end - | "__builtin_fnmadd", [FR a1; FR a2; FR a3], [FR res] -> + | "__builtin_fnmadd", [BA(FR a1); BA(FR a2); BA(FR a3)], BR(FR res) -> if res = a1 then emit (Pfnmadd132 (a2,a3,res)) else if res = a2 then @@ -281,7 +278,7 @@ let expand_builtin_inline name args res = emit (Pmovsd_ff (a2,res)); emit (Pfnmadd231 (a1,a2,res)) end - |"__builtin_fnmsub", [FR a1; FR a2; FR a3], [FR res] -> + |"__builtin_fnmsub", [BA(FR a1); BA(FR a2); BA(FR a3)], BR(FR res) -> if res = a1 then emit (Pfnmsub132 (a2,a3,res)) else if res = a2 then @@ -293,32 +290,38 @@ let expand_builtin_inline name args res = emit (Pfnmsub231 (a1,a2,res)) end (* 64-bit integer arithmetic *) - | "__builtin_negl", [IR ah; IR al], [IR rh; IR rl] -> + | "__builtin_negl", [BA_longofwords(BA(IR ah), BA(IR al))], + BR_longofwords(BR(IR rh), BR(IR rl)) -> assert (ah = EDX && al = EAX && rh = EDX && rl = EAX); emit (Pneg EAX); emit (Padcl_ir (_0,EDX)); emit (Pneg EDX) - | "__builtin_addl", [IR ah; IR al; IR bh; IR bl], [IR rh; IR rl] -> + | "__builtin_addl", [BA_longofwords(BA(IR ah), BA(IR al)); + BA_longofwords(BA(IR bh), BA(IR bl))], + BR_longofwords(BR(IR rh), BR(IR rl)) -> assert (ah = EDX && al = EAX && bh = ECX && bl = EBX && rh = EDX && rl = EAX); emit (Paddl (EBX,EAX)); emit (Padcl_rr (ECX,EDX)) - | "__builtin_subl", [IR ah; IR al; IR bh; IR bl], [IR rh; IR rl] -> + | "__builtin_subl", [BA_longofwords(BA(IR ah), BA(IR al)); + BA_longofwords(BA(IR bh), BA(IR bl))], + BR_longofwords(BR(IR rh), BR(IR rl)) -> assert (ah = EDX && al = EAX && bh = ECX && bl = EBX && rh = EDX && rl = EAX); emit (Psub_rr (EBX,EAX)); emit (Psbbl (ECX,EDX)) - | "__builtin_mull", [IR a; IR b], [IR rh; IR rl] -> + | "__builtin_mull", [BA(IR a); BA(IR b)], + BR_longofwords(BR(IR rh), BR(IR rl)) -> assert (a = EAX && b = EDX && rh = EDX && rl = EAX); emit (Pmul_r EDX) (* Memory accesses *) - | "__builtin_read16_reversed", [IR a1], [IR res] -> + | "__builtin_read16_reversed", [BA(IR a1)], BR(IR res) -> let addr = Addrmode(Some a1,None,Coq_inl _0) in emit (Pmovzw_rm (res,addr)); emit (Prolw_8 res) - | "__builtin_read32_reversed", [IR a1], [IR res] -> + | "__builtin_read32_reversed", [BA(IR a1)], BR(IR res) -> let addr = Addrmode(Some a1,None,Coq_inl _0) in emit (Pmov_rm (res,addr)); emit (Pbswap res) - | "__builtin_write16_reversed", [IR a1; IR a2], _ -> + | "__builtin_write16_reversed", [BA(IR a1); BA(IR a2)], _ -> let tmp = if a1 = ECX then EDX else ECX in let addr = Addrmode(Some a1,None,Coq_inl _0) in if a2 <> tmp then @@ -326,7 +329,7 @@ let expand_builtin_inline name args res = emit (Pxchg (tmp,tmp)); emit (Pmovw_mr (addr,tmp)) (* Vararg stuff *) - | "__builtin_va_start", [IR a], _ -> + | "__builtin_va_start", [BA(IR a)], _ -> expand_builtin_va_start a (* Synchronization *) | "__builtin_membar", [], _ -> @@ -335,7 +338,7 @@ let expand_builtin_inline name args res = | _ -> invalid_arg ("unrecognized builtin " ^ name) - +(* Expansion of instructions *) let expand_instruction instr = match instr with @@ -361,18 +364,15 @@ let expand_instruction instr = expand_builtin_vload chunk args res | EF_vstore chunk -> expand_builtin_vstore chunk args - | EF_vload_global(chunk, id, ofs) -> - expand_builtin_vload_global chunk id ofs args res - | EF_vstore_global(chunk, id, ofs) -> - expand_builtin_vstore_global chunk id ofs args | EF_memcpy(sz, al) -> expand_builtin_memcpy (Int32.to_int (camlint_of_coqint sz)) (Int32.to_int (camlint_of_coqint al)) args | EF_annot_val(txt, targ) -> expand_annot_val txt targ args res - | EF_inline_asm(txt, sg, clob) -> + | EF_annot _ | EF_debug _ | EF_inline_asm _ -> emit instr - | _ -> assert false + | _ -> + assert false end | _ -> emit instr -- cgit