dotgnu-pnet-commits
[Top][All Lists]
Advanced

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[dotgnu-pnet-commits] libjit ChangeLog jit/jit-gen-x86-64.h jit/jit-r...


From: Klaus Treichel
Subject: [dotgnu-pnet-commits] libjit ChangeLog jit/jit-gen-x86-64.h jit/jit-r...
Date: Sun, 30 Mar 2008 15:05:14 +0000

CVSROOT:        /cvsroot/dotgnu-pnet
Module name:    libjit
Changes by:     Klaus Treichel <ktreichel>      08/03/30 15:05:14

Modified files:
        .              : ChangeLog 
        jit            : jit-gen-x86-64.h jit-rules-x86-64.ins 

Log message:
        Add additional macros and opcodehandling for x86-64.

CVSWeb URLs:
http://cvs.savannah.gnu.org/viewcvs/libjit/ChangeLog?cvsroot=dotgnu-pnet&r1=1.356&r2=1.357
http://cvs.savannah.gnu.org/viewcvs/libjit/jit/jit-gen-x86-64.h?cvsroot=dotgnu-pnet&r1=1.3&r2=1.4
http://cvs.savannah.gnu.org/viewcvs/libjit/jit/jit-rules-x86-64.ins?cvsroot=dotgnu-pnet&r1=1.2&r2=1.3

Patches:
Index: ChangeLog
===================================================================
RCS file: /cvsroot/dotgnu-pnet/libjit/ChangeLog,v
retrieving revision 1.356
retrieving revision 1.357
diff -u -b -r1.356 -r1.357
--- ChangeLog   29 Mar 2008 18:45:24 -0000      1.356
+++ ChangeLog   30 Mar 2008 15:05:13 -0000      1.357
@@ -1,3 +1,12 @@
+2008-03-30  Klaus Treichel  <address@hidden>
+
+       * jit/jit-gen-x86-64.h: Add macros for the test, imul, cdw/cdq/cqo
+       and cmov instructions.
+
+       * jit/jit-rules-x84-64.ins: Add IMUL, IDIV, IDIV_UN, IREM, IREM_UN,
+       LMUL, LDIV, LDIV_UN, LREM and LREM_UN opcodes. Replace the compares
+       with zero done with or with test instructions.
+
 2008-03-29  Klaus treichel  <address@hidden>
 
        * jit/jit-rules-x86.ins: Fix signed division of negative values by a

Index: jit/jit-gen-x86-64.h
===================================================================
RCS file: /cvsroot/dotgnu-pnet/libjit/jit/jit-gen-x86-64.h,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -b -r1.3 -r1.4
--- jit/jit-gen-x86-64.h        24 Mar 2008 12:42:51 -0000      1.3
+++ jit/jit-gen-x86-64.h        30 Mar 2008 15:05:13 -0000      1.4
@@ -2235,6 +2235,383 @@
        } while(0)
 
 /*
+ * test: and tha values and set sf, zf and pf according to the result
+ */
+#define x86_64_test_reg_imm_size(inst, reg, imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), 0, 0, (reg)); \
+               if((reg) == X86_64_RAX) { \
+                       x86_64_opcode1_emit((inst), 0xa8, (size)); \
+               } \
+               else \
+               { \
+                       x86_64_opcode1_emit((inst), 0xf6, (size)); \
+                       x86_64_reg_emit((inst), 0, (reg)); \
+               } \
+               x86_64_imm_emit_max32((inst), (imm), (size)); \
+       } while (0)
+
+#define x86_64_test_regp_imm_size(inst, regp, imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), 0, 0, (regp)); \
+               x86_64_opcode1_emit((inst), 0xf6, (size)); \
+               x86_64_regp_emit((inst), 0, (regp)); \
+               x86_64_imm_emit_max32((inst), (imm), (size)); \
+       } while (0)
+
+#define x86_64_test_mem_imm_size(inst, mem, imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), 0, 0, 0); \
+               x86_64_opcode1_emit((inst), 0xf6, (size)); \
+               x86_64_mem_emit((inst), 0, (mem)); \
+               x86_64_imm_emit_max32((inst), (imm), (size)); \
+       } while (0)
+
+#define x86_64_test_membase_imm_size(inst, basereg, disp, imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), 0, 0, (basereg)); \
+               x86_64_opcode1_emit((inst), 0xf6, (size)); \
+               x86_64_membase_emit((inst), 0, (basereg), (disp)); \
+               x86_64_imm_emit_max32((inst), (imm), (size)); \
+       } while (0)
+
+#define x86_64_test_memindex_imm_size(inst, basereg, disp, indexreg, shift, 
imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), 0, (indexreg), (basereg)); \
+               x86_64_opcode1_emit((inst), 0xf6, (size)); \
+               x86_64_memindex_emit((inst), 0, (basereg), (disp), (indexreg), 
(shift)); \
+               x86_64_imm_emit_max32((inst), (imm), (size)); \
+       } while (0)
+
+#define x86_64_test_reg_reg_size(inst, dreg, sreg, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (sreg), 0, (dreg)); \
+               x86_64_opcode1_emit((inst), 0x84, (size)); \
+               x86_64_reg_emit((inst), (sreg), (dreg)); \
+       } while (0)
+
+#define x86_64_test_regp_reg_size(inst, dregp, sreg, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (sreg), 0, (dregp)); \
+               x86_64_opcode1_emit((inst), 0x84, (size)); \
+               x86_64_regp_emit((inst), (sreg), (dregp)); \
+       } while (0)
+
+#define x86_64_test_mem_reg_size(inst, mem, sreg, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (sreg), 0, 0); \
+               x86_64_opcode1_emit((inst), 0x84, (size)); \
+               x86_64_mem_emit((inst), (sreg), (mem)); \
+       } while (0)
+
+#define x86_64_test_membase_reg_size(inst, basereg, disp, sreg, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (sreg), 0, (basereg)); \
+               x86_64_opcode1_emit((inst), 0x84, (size)); \
+               x86_64_membase_emit((inst), (sreg), (basereg), (disp)); \
+       } while (0)
+
+#define x86_64_test_memindex_reg_size(inst, basereg, disp, indexreg, shift, 
sreg, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (sreg), (indexreg), (basereg)); 
\
+               x86_64_opcode1_emit((inst), 0x84, (size)); \
+               x86_64_memindex_emit((inst), (sreg), (basereg), (disp), 
(indexreg), (shift)); \
+       } while (0)
+
+/*
+ * imul: signed multiply
+ */
+#define x86_64_imul_reg_reg_imm_size(inst, dreg, sreg, imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (sreg)); \
+               if(x86_is_imm8((imm))) \
+               { \
+                       *(inst)++ = (unsigned char)0x6b; \
+                       x86_64_reg_emit((inst), (dreg), (sreg)); \
+                       x86_imm_emit8((inst), (imm)); \
+               } \
+               else \
+               { \
+                       *(inst)++ = (unsigned char)0x69; \
+                       x86_64_reg_emit((inst), (dreg), (sreg)); \
+                       switch((size)) \
+                       { \
+                               case 2: \
+                               { \
+                                       x86_imm_emit16(inst, (imm)); \
+                               } \
+                               break; \
+                               case 4: \
+                               case 8: \
+                               { \
+                                       x86_imm_emit32(inst, (imm)); \
+                               } \
+                               break; \
+                       } \
+               } \
+       } while(0)
+
+#define x86_64_imul_reg_regp_imm_size(inst, dreg, sregp, imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (sregp)); \
+               if(x86_is_imm8((imm))) \
+               { \
+                       *(inst)++ = (unsigned char)0x6b; \
+                       x86_64_regp_emit((inst), (dreg), (sregp)); \
+                       x86_imm_emit8((inst), (imm)); \
+               } \
+               else \
+               { \
+                       *(inst)++ = (unsigned char)0x69; \
+                       x86_64_regp_emit((inst), (dreg), (sregp)); \
+                       switch((size)) \
+                       { \
+                               case 2: \
+                               { \
+                                       x86_imm_emit16(inst, (imm)); \
+                               } \
+                               break; \
+                               case 4: \
+                               case 8: \
+                               { \
+                                       x86_imm_emit32(inst, (imm)); \
+                               } \
+                               break; \
+                       } \
+               } \
+       } while(0)
+
+#define x86_64_imul_reg_mem_imm_size(inst, dreg, mem, imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, 0); \
+               if(x86_is_imm8((imm))) \
+               { \
+                       *(inst)++ = (unsigned char)0x6b; \
+                       x86_64_mem_emit((inst), (dreg), (mem)); \
+                       x86_imm_emit8((inst), (imm)); \
+               } \
+               else \
+               { \
+                       *(inst)++ = (unsigned char)0x69; \
+                       x86_64_mem_emit((inst), (dreg), (mem)); \
+                       switch((size)) \
+                       { \
+                               case 2: \
+                               { \
+                                       x86_imm_emit16(inst, (imm)); \
+                               } \
+                               break; \
+                               case 4: \
+                               case 8: \
+                               { \
+                                       x86_imm_emit32(inst, (imm)); \
+                               } \
+                               break; \
+                       } \
+               } \
+       } while(0)
+
+#define x86_64_imul_reg_membase_imm_size(inst, dreg, basereg, disp, imm, size) 
\
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (basereg)); \
+               if(x86_is_imm8((imm))) \
+               { \
+                       *(inst)++ = (unsigned char)0x6b; \
+                       x86_64_membase_emit((inst), (dreg), (basereg), (disp)); 
\
+                       x86_imm_emit8((inst), (imm)); \
+               } \
+               else \
+               { \
+                       *(inst)++ = (unsigned char)0x69; \
+                       x86_64_membase_emit((inst), (dreg), (basereg), (disp)); 
\
+                       switch((size)) \
+                       { \
+                               case 2: \
+                               { \
+                                       x86_imm_emit16(inst, (imm)); \
+                               } \
+                               break; \
+                               case 4: \
+                               case 8: \
+                               { \
+                                       x86_imm_emit32(inst, (imm)); \
+                               } \
+                               break; \
+                       } \
+               } \
+       } while(0)
+
+#define x86_64_imul_reg_memindex_imm_size(inst, dreg, basereg, disp, indexreg, 
shift, imm, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), (indexreg), (basereg)); 
\
+               if(x86_is_imm8((imm))) \
+               { \
+                       *(inst)++ = (unsigned char)0x6b; \
+                       x86_64_memindex_emit((inst), (dreg), (basereg), (disp), 
(indexreg), (shift)); \
+                       x86_imm_emit8((inst), (imm)); \
+               } \
+               else \
+               { \
+                       *(inst)++ = (unsigned char)0x69; \
+                       x86_64_memindex_emit((inst), (dreg), (basereg), (disp), 
(indexreg), (shift)); \
+                       switch((size)) \
+                       { \
+                               case 2: \
+                               { \
+                                       x86_imm_emit16(inst, (imm)); \
+                               } \
+                               break; \
+                               case 4: \
+                               case 8: \
+                               { \
+                                       x86_imm_emit32(inst, (imm)); \
+                               } \
+                               break; \
+                       } \
+               } \
+       } while(0)
+
+#define x86_64_imul_reg_reg_size(inst, dreg, sreg, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (sreg)); \
+               *(inst)++ = (unsigned char)0x0F; \
+               *(inst)++ = (unsigned char)0xAF; \
+               x86_64_reg_emit((inst), (dreg), (sreg)); \
+       } while(0)
+
+#define x86_64_imul_reg_regp_size(inst, dreg, sregp, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (sregp)); \
+               *(inst)++ = (unsigned char)0x0F; \
+               *(inst)++ = (unsigned char)0xAF; \
+               x86_64_regp_emit((inst), (dreg), (sregp)); \
+       } while(0)
+
+#define x86_64_imul_reg_mem_size(inst, dreg, mem, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, 0); \
+               *(inst)++ = (unsigned char)0x0F; \
+               *(inst)++ = (unsigned char)0xAF; \
+               x86_64_mem_emit((inst), (dreg), (mem)); \
+       } while(0)
+
+#define x86_64_imul_reg_membase_size(inst, dreg, basereg, disp, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (basereg)); \
+               *(inst)++ = (unsigned char)0x0F; \
+               *(inst)++ = (unsigned char)0xAF; \
+               x86_64_membase_emit((inst), (dreg), (basereg), (disp)); \
+       } while(0)
+
+#define x86_64_imul_reg_memindex_size(inst, dreg, basereg, disp, indexreg, 
shift, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), (indexreg), (basereg)); 
\
+               *(inst)++ = (unsigned char)0x0F; \
+               *(inst)++ = (unsigned char)0xAF; \
+               x86_64_memindex_emit((inst), (dreg), (basereg), (disp), 
(indexreg), (shift)); \
+       } while(0)
+
+/*
+ * cwd, cdq, cqo: sign extend ax to dx (used for div and idiv)
+ */
+#define x86_64_cwd(inst) \
+       do { \
+               *(inst)++ = (unsigned char)0x66; \
+               *(inst)++ = (unsigned char)0x99; \
+       } while(0)
+
+#define x86_64_cdq(inst) \
+       do { \
+               *(inst)++ = (unsigned char)0x99; \
+       } while(0)
+
+#define x86_64_cqo(inst) \
+       do { \
+               *(inst)++ = (unsigned char)0x48; \
+               *(inst)++ = (unsigned char)0x99; \
+       } while(0)
+
+/*
  * Lea instructions
  */
 #define x86_64_lea_mem_size(inst, dreg, mem, size) \
@@ -2631,6 +3008,104 @@
        }while(0)
 
 /*
+ * cmov: conditional move
+ */
+#define x86_64_cmov_reg_reg_size(inst, cond, dreg, sreg, is_signed, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (sreg)); \
+               *(inst)++ = (unsigned char)0x0f; \
+               if((is_signed)) \
+               { \
+                       *(inst)++ = x86_cc_signed_map[(cond)] - 0x30; \
+               } \
+               else \
+               { \
+                       *(inst)++ = x86_cc_unsigned_map[(cond)] - 0x30; \
+               } \
+               x86_64_reg_emit((inst), (dreg), (sreg)); \
+       } while (0)
+
+#define x86_64_cmov_reg_regp_size(inst, cond, dreg, sregp, is_signed, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (sregp)); \
+               *(inst)++ = (unsigned char)0x0f; \
+               if((is_signed)) \
+               { \
+                       *(inst)++ = x86_cc_signed_map[(cond)] - 0x30; \
+               } \
+               else \
+               { \
+                       *(inst)++ = x86_cc_unsigned_map[(cond)] - 0x30; \
+               } \
+               x86_64_regp_emit((inst), (dreg), (sregp)); \
+       } while (0)
+
+#define x86_64_cmov_reg_mem_size(inst, cond, dreg, mem, is_signed, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, 0); \
+               *(inst)++ = (unsigned char)0x0f; \
+               if((is_signed)) \
+               { \
+                       *(inst)++ = x86_cc_signed_map[(cond)] - 0x30; \
+               } \
+               else \
+               { \
+                       *(inst)++ = x86_cc_unsigned_map[(cond)] - 0x30; \
+               } \
+               x86_64_mem_emit((inst), (dreg), (mem)); \
+       } while (0)
+
+#define x86_64_cmov_reg_membase_size(inst, cond, dreg, basereg, disp, 
is_signed, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), 0, (basereg)); \
+               *(inst)++ = (unsigned char)0x0f; \
+               if((is_signed)) \
+               { \
+                       *(inst)++ = x86_cc_signed_map[(cond)] - 0x30; \
+               } \
+               else \
+               { \
+                       *(inst)++ = x86_cc_unsigned_map[(cond)] - 0x30; \
+               } \
+               x86_64_membase_emit((inst), (dreg), (basereg), (disp)); \
+       } while (0)
+
+#define x86_64_cmov_reg_memindex_size(inst, cond, dreg, basereg, disp, 
indexreg, shift, is_signed, size) \
+       do { \
+               if((size) == 2) \
+               { \
+                       *(inst)++ = (unsigned char)0x66; \
+               } \
+               x86_64_rex_emit((inst), (size), (dreg), (indexreg), (basereg)); 
\
+               *(inst)++ = (unsigned char)0x0f; \
+               if((is_signed)) \
+               { \
+                       *(inst)++ = x86_cc_signed_map[(cond)] - 0x30; \
+               } \
+               else \
+               { \
+                       *(inst)++ = x86_cc_unsigned_map[(cond)] - 0x30; \
+               } \
+               x86_64_memindex_emit((inst), (dreg), (basereg), (disp), 
(indexreg), (shift)); \
+       } while (0)
+
+/*
  * Stack manupulation instructions (push and pop)
  */
 

Index: jit/jit-rules-x86-64.ins
===================================================================
RCS file: /cvsroot/dotgnu-pnet/libjit/jit/jit-rules-x86-64.ins,v
retrieving revision 1.2
retrieving revision 1.3
diff -u -b -r1.2 -r1.3
--- jit/jit-rules-x86-64.ins    24 Mar 2008 12:42:51 -0000      1.2
+++ jit/jit-rules-x86-64.ins    30 Mar 2008 15:05:14 -0000      1.3
@@ -820,6 +820,217 @@
                x86_64_neg_reg_size(inst, $1, 4);
        }
 
+JIT_OP_IMUL: commutative
+       [reg, immzero] -> {
+               x86_64_clear_reg(inst, $1);
+       }
+       [reg, imm, if("$2 == -1")] -> {
+               x86_64_neg_reg_size(inst, $1, 4);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+       }
+       [reg, imm, if("$2 == 2")] -> {
+               x86_64_add_reg_reg_size(inst, $1, $1, 4);
+       }
+       [reg, imm, if("(((jit_nuint)$2) & (((jit_nuint)$2) - 1)) == 0")] -> {
+               /* x & (x - 1) is equal to zero if x is a power of 2  */
+               jit_nuint shift, value = $2 >> 1;
+               for(shift = 0; value; value >>= 1)
+               {
+                   ++shift;
+               }
+               x86_64_shl_reg_imm_size(inst, $1, shift, 4);
+       }
+       [reg, imm] -> {
+               x86_64_imul_reg_reg_imm_size(inst, $1, $1, $2, 4);
+       }
+       [reg, local] -> {
+               x86_64_imul_reg_membase_size(inst, $1, X86_64_RBP, $2, 4);
+       }
+       [reg, reg] -> {
+               x86_64_imul_reg_reg_size(inst, $1, $2, 4);
+       }
+
+JIT_OP_IDIV: more_space
+       [any, immzero] -> {
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+       }
+       [reg, imm, if("$2 == -1")] -> {
+               /* Dividing by -1 gives an exception if the argument
+                  is minint, or simply negates for other values */
+               jit_int min_int = jit_min_int;
+               unsigned char *patch;
+               x86_64_cmp_reg_imm_size(inst, $1, min_int, 4);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_ARITHMETIC);
+               x86_patch(patch, inst);
+               x86_64_neg_reg_size(inst, $1, 4);
+       }
+       [reg, imm, scratch reg, if("$2 == 2")] -> {
+               /* move the value to be divided to the temporary */
+               x86_64_mov_reg_reg_size(inst, $3, $1, 4);
+               /* shift the temporary to the 31 bits right */
+               /* The result is 1 for negative values and 0 for zero or */
+               /* positive values. (corrective value for negatives) */
+               x86_64_shr_reg_imm_size(inst, $3, 0x1f, 4);
+               /* Add the corrective value to the divident */
+               x86_64_add_reg_reg_size(inst, $1, $3, 4);
+               /* and do the right shift */
+               x86_64_sar_reg_imm_size(inst, $1, 1, 4);
+       }
+       [reg, imm, scratch reg, if("($2 > 0) && (((jit_nuint)$2) & 
(((jit_nuint)$2) - 1)) == 0")] -> {
+               /* x & (x - 1) is equal to zero if x is a power of 2  */
+               jit_nuint shift, corr, value = $2 >> 1;
+               for(shift = 0; value; value >>= 1)
+               {
+                   ++shift;
+               }
+               corr = $2 - 1;
+               x86_64_lea_membase_size(inst, $3, $1, corr, 4);
+               x86_64_test_reg_reg_size(inst, $1, $1, 4);
+               x86_64_cmov_reg_reg_size(inst, X86_CC_S, $1, $3, 1, 4);
+               x86_64_sar_reg_imm_size(inst, $1, shift, 4);
+       }
+       [reg("rax"), imm, scratch reg, scratch reg("rdx")] -> {
+               x86_64_mov_reg_imm_size(inst, $3, $2, 4);
+               x86_64_cdq(inst);
+               x86_64_idiv_reg_size(inst, $3, 4);
+       }
+       [reg("rax"), reg, scratch reg("rdx")] -> {
+               jit_int min_int = jit_min_int;
+               unsigned char *patch, *patch2;
+#ifndef JIT_USE_SIGNALS
+               x86_64_test_reg_reg_size(inst, $2, $2, 4);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+               x86_patch(patch, inst);
+#endif
+               x86_64_cmp_reg_imm_size(inst, $2, -1, 4);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               x86_64_cmp_reg_imm_size(inst, $1, min_int, 4);
+               patch2 = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_ARITHMETIC);
+               x86_patch(patch, inst);
+               x86_patch(patch2, inst);
+               x86_64_cdq(inst);
+               x86_64_idiv_reg_size(inst, $2, 4);
+       }
+
+JIT_OP_IDIV_UN: more_space
+       [any, immzero] -> {
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+       }
+       [reg, imm, if("(((jit_nuint)$2) & (((jit_nuint)$2) - 1)) == 0")] -> {
+               /* x & (x - 1) is equal to zero if x is a power of 2  */
+               jit_nuint shift, value = $2 >> 1;
+               for(shift = 0; value; value >>= 1)
+               {
+                   ++shift;
+               }
+               x86_64_shr_reg_imm_size(inst, $1, shift, 4);
+       }
+       [reg("rax"), imm, scratch reg, scratch reg("rdx")] -> {
+               x86_64_mov_reg_imm_size(inst, $3, $2, 4);
+               x86_64_clear_reg(inst, X86_64_RDX);
+               x86_64_div_reg_size(inst, $3, 4);
+       }
+       [reg("rax"), reg, scratch reg("rdx")] -> {
+#ifndef JIT_USE_SIGNALS
+               unsigned char *patch;
+               x86_64_test_reg_reg_size(inst, $2, $2, 4);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+               x86_patch(patch, inst);
+#endif
+               x86_64_clear_reg(inst, X86_64_RDX);
+               x86_64_div_reg_size(inst, $2, 4);
+       }
+
+JIT_OP_IREM: more_space
+       [any, immzero] -> {
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+               x86_64_clear_reg(inst, $1);
+       }
+       [reg, imm, if("$2 == -1")] -> {
+               /* Dividing by -1 gives an exception if the argument
+                  is minint, or simply gives a remainder of zero */
+               jit_int min_int = jit_min_int;
+               unsigned char *patch;
+               x86_64_cmp_reg_imm_size(inst, $1, min_int, 4);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_ARITHMETIC);
+               x86_patch(patch, inst);
+               x86_64_clear_reg(inst, $1);
+       }
+       [=reg("rdx"), *reg("rax"), imm, scratch reg, scratch reg("rdx")] -> {
+               x86_64_mov_reg_imm_size(inst, $4, $3, 4);
+               x86_64_cdq(inst);
+               x86_64_idiv_reg_size(inst, $4, 4);
+       }
+       [=reg("rdx"), *reg("rax"), reg, scratch reg("rdx")] -> {
+               jit_int min_int = jit_min_int;
+               unsigned char *patch, *patch2;
+#ifndef JIT_USE_SIGNALS
+               x86_64_test_reg_reg_size(inst, $3, $3, 4);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+               x86_patch(patch, inst);
+#endif
+               x86_64_cmp_reg_imm_size(inst, $3, -1, 4);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               x86_64_cmp_reg_imm_size(inst, $2, min_int, 4);
+               patch2 = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_ARITHMETIC);
+               x86_patch(patch, inst);
+               x86_patch(patch2, inst);
+               x86_64_cdq(inst);
+               x86_64_idiv_reg_size(inst, $3, 4);
+       }
+
+JIT_OP_IREM_UN: more_space
+       [any, immzero] -> {
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+               x86_64_clear_reg(inst, $1);
+       }
+       [reg, imm, if("(((jit_nuint)$2) & (((jit_nuint)$2) - 1)) == 0")] -> {
+               /* x & (x - 1) is equal to zero if x is a power of 2  */
+               x86_64_and_reg_imm_size(inst, $1, $2 - 1, 4);
+       }
+       [=reg("rdx"), *reg("rax"), imm, scratch reg, scratch reg("rdx")] -> {
+               x86_64_mov_reg_imm_size(inst, $4, $3, 4);
+               x86_64_clear_reg(inst, X86_64_RDX);
+               x86_64_div_reg_size(inst, $4, 4);
+       }
+       [=reg("rdx"), *reg("rax"), reg, scratch reg("rdx")] -> {
+#ifndef JIT_USE_SIGNALS
+               unsigned char *patch;
+               x86_64_test_reg_reg_size(inst, $3, $3, 4);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+               x86_patch(patch, inst);
+#endif
+               x86_64_clear_reg(inst, X86_64_RDX);
+               x86_64_div_reg_size(inst, $3, 4);
+       }
+
 /*
  * 8 byte integer versions
  */
@@ -864,6 +1075,250 @@
        [reg] -> {
                x86_64_neg_reg_size(inst, $1, 8);
        }
+
+JIT_OP_LMUL: commutative
+       [reg, immzero] -> {
+               x86_64_clear_reg(inst, $1);
+       }
+       [reg, imm, if("$2 == -1")] -> {
+               x86_64_neg_reg_size(inst, $1, 8);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+       }
+       [reg, imm, if("$2 == 2")] -> {
+               x86_64_add_reg_reg_size(inst, $1, $1, 8);
+       }
+       [reg, imm, if("(((jit_nuint)$2) & (((jit_nuint)$2) - 1)) == 0")] -> {
+               /* x & (x - 1) is equal to zero if x is a power of 2  */
+               jit_nuint shift, value = $2 >> 1;
+               for(shift = 0; value; value >>= 1)
+               {
+                   ++shift;
+               }
+               x86_64_shl_reg_imm_size(inst, $1, shift, 8);
+       }
+       [reg, imm, if("($2 >= (jit_nint)jit_min_int) && ($2 <= 
(jit_nint)jit_max_int)")] -> {
+               x86_64_imul_reg_reg_imm_size(inst, $1, $1, $2, 8);
+       }
+       [reg, local] -> {
+               x86_64_imul_reg_membase_size(inst, $1, X86_64_RBP, $2, 8);
+       }
+       [reg, reg] -> {
+               x86_64_imul_reg_reg_size(inst, $1, $2, 8);
+       }
+
+JIT_OP_LDIV: more_space
+       [any, immzero] -> {
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+       }
+       [reg, imm, scratch reg, if("$2 == -1")] -> {
+               /* Dividing by -1 gives an exception if the argument
+                  is minint, or simply negates for other values */
+               jit_long min_long = jit_min_long;
+               unsigned char *patch;
+               x86_64_mov_reg_imm_size(inst, $3, min_long, 8);
+               x86_64_cmp_reg_reg_size(inst, $1, $3, 8);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_ARITHMETIC);
+               x86_patch(patch, inst);
+               x86_64_neg_reg_size(inst, $1, 8);
+       }
+       [reg, imm, scratch reg, if("$2 == 2")] -> {
+               /* move the value to be divided to the temporary */
+               x86_64_mov_reg_reg_size(inst, $3, $1, 8);
+               /* shift the temporary to the 63 bits right */
+               /* The result is 1 for negative values and 0 for zero or */
+               /* positive values. (corrective value for negatives) */
+               x86_64_shr_reg_imm_size(inst, $3, 0x3f, 8);
+               /* Add the corrective value to the divident */
+               x86_64_add_reg_reg_size(inst, $1, $3, 8);
+               /* and do the right shift */
+               x86_64_sar_reg_imm_size(inst, $1, 1, 8);
+       }
+       [reg, imm, scratch reg, if("($2 > 0) && (((jit_nuint)$2) & 
(((jit_nuint)$2) - 1)) == 0")] -> {
+               /* x & (x - 1) is equal to zero if x is a power of 2  */
+               jit_nuint shift, value = $2 >> 1;
+               for(shift = 0; value; value >>= 1)
+               {
+                   ++shift;
+               }
+               if((jit_nuint)$2 <= (jit_nuint)jit_max_uint)
+               {
+                       jit_nuint corr = ($2 - 1);
+
+                       x86_64_lea_membase_size(inst, $3, $1, corr, 8);
+                       x86_64_test_reg_reg_size(inst, $1, $1, 8);
+               }
+               else
+               {
+                       jit_nuint corr = ($2 - 1);
+
+                       if(corr <= (jit_nuint)jit_max_uint)
+                       {
+                               x86_64_mov_reg_imm_size(inst, $3, corr, 4);
+                       }
+                       else
+                       {
+                               x86_64_mov_reg_imm_size(inst, $3, corr, 8);
+                       }
+                       x86_64_test_reg_reg_size(inst, $1, $1, 8);
+                       x86_64_lea_memindex_size(inst, $3, $1, 0, $3, 0, 8);
+               }
+               x86_64_cmov_reg_reg_size(inst, X86_CC_S, $1, $3, 1, 8);
+               x86_64_sar_reg_imm_size(inst, $1, shift, 8);
+       }
+       [reg("rax"), imm, scratch reg, scratch reg("rdx")] -> {
+               x86_64_mov_reg_imm_size(inst, $3, $2, 8);
+               x86_64_cqo(inst);
+               x86_64_idiv_reg_size(inst, $3, 8);
+       }
+       [reg("rax"), reg, scratch reg("rdx")] -> {
+               jit_long min_long = jit_min_long;
+               unsigned char *patch, *patch2;
+#ifndef JIT_USE_SIGNALS
+               x86_64_or_reg_reg_size(inst, $2, $2, 8);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+               x86_patch(patch, inst);
+#endif
+               x86_64_cmp_reg_imm_size(inst, $2, -1, 8);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               x86_64_mov_reg_imm_size(inst, $3, min_long, 8);
+               x86_64_cmp_reg_reg_size(inst, $1, $3, 8);
+               patch2 = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_ARITHMETIC);
+               x86_patch(patch, inst);
+               x86_patch(patch2, inst);
+               x86_64_cqo(inst);
+               x86_64_idiv_reg_size(inst, $2, 8);
+       }
+
+JIT_OP_LDIV_UN: more_space
+       [any, immzero] -> {
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+       }
+       [reg, imm, if("(((jit_nuint)$2) & (((jit_nuint)$2) - 1)) == 0")] -> {
+               /* x & (x - 1) is equal to zero if x is a power of 2  */
+               jit_nuint shift, value = $2 >> 1;
+               for(shift = 0; value; value >>= 1)
+               {
+                   ++shift;
+               }
+               x86_64_shr_reg_imm_size(inst, $1, shift, 8);
+       }
+       [reg("rax"), imm, scratch reg, scratch reg("rdx")] -> {
+               x86_64_mov_reg_imm_size(inst, $3, $2, 8);
+               x86_64_clear_reg(inst, X86_64_RDX);
+               x86_64_div_reg_size(inst, $3, 8);
+       }
+       [reg("rax"), reg, scratch reg("rdx")] -> {
+#ifndef JIT_USE_SIGNALS
+               unsigned char *patch;
+               x86_64_test_reg_reg_size(inst, $2, $2, 8);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+               x86_patch(patch, inst);
+#endif
+               x86_64_clear_reg(inst, X86_64_RDX);
+               x86_64_div_reg_size(inst, $2, 8);
+       }
+
+JIT_OP_LREM: more_space
+       [any, immzero] -> {
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+               x86_64_clear_reg(inst, $1);
+       }
+       [reg, imm, if("$2 == -1")] -> {
+               /* Dividing by -1 gives an exception if the argument
+                  is minint, or simply gives a remainder of zero */
+               jit_long min_long = jit_min_long;
+               unsigned char *patch;
+               x86_64_cmp_reg_imm_size(inst, $1, min_long, 8);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_ARITHMETIC);
+               x86_patch(patch, inst);
+               x86_64_clear_reg(inst, $1);
+       }
+       [=reg("rdx"), *reg("rax"), imm, scratch reg, scratch reg("rdx")] -> {
+               x86_64_mov_reg_imm_size(inst, $4, $3, 8);
+               x86_64_cqo(inst);
+               x86_64_idiv_reg_size(inst, $4, 8);
+       }
+       [=reg("rdx"), *reg("rax"), reg, scratch reg("rdx")] -> {
+               jit_long min_long = jit_min_long;
+               unsigned char *patch, *patch2;
+#ifndef JIT_USE_SIGNALS
+               x86_64_test_reg_reg_size(inst, $3, $3, 8);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+               x86_patch(patch, inst);
+#endif
+               x86_64_mov_reg_imm_size(inst, $1, min_long, 8);
+               x86_64_cmp_reg_imm_size(inst, $3, -1, 8);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               x86_64_cmp_reg_reg_size(inst, $2, $1, 8);
+               patch2 = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_ARITHMETIC);
+               x86_patch(patch, inst);
+               x86_patch(patch2, inst);
+               x86_64_cqo(inst);
+               x86_64_idiv_reg_size(inst, $3, 8);
+       }
+
+JIT_OP_LREM_UN: more_space
+       [any, immzero] -> {
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+       }
+       [reg, imm, if("$2 == 1")] -> {
+               x86_64_clear_reg(inst, $1);
+       }
+       [reg, imm, scratch reg, if("(((jit_nuint)$2) & (((jit_nuint)$2) - 1)) 
== 0")] -> {
+               /* x & (x - 1) is equal to zero if x is a power of 2  */
+               if(($2 >= jit_min_int) && ($2 <= jit_max_int))
+               {
+                       x86_64_and_reg_imm_size(inst, $1, $2 - 1, 8);
+               }
+               else
+               {
+                       jit_long temp = $2 - 1;
+
+                       x86_64_mov_reg_imm_size(inst, $3, temp, 8);
+                       x86_64_and_reg_reg_size(inst, $1, $3, 8);
+               }
+       }
+       [=reg("rdx"), *reg("rax"), imm, scratch reg, scratch reg("rdx")] -> {
+               x86_64_mov_reg_imm_size(inst, $4, $3, 8);
+               x86_64_clear_reg(inst, X86_64_RDX);
+               x86_64_div_reg_size(inst, $4, 8);
+       }
+       [=reg("rdx"), *reg("rax"), reg, scratch reg("rdx")] -> {
+#ifndef JIT_USE_SIGNALS
+               unsigned char *patch;
+               x86_64_test_reg_reg_size(inst, $3, $3, 8);
+               patch = inst;
+               x86_branch8(inst, X86_CC_NE, 0, 0);
+               inst = throw_builtin(inst, func, JIT_RESULT_DIVISION_BY_ZERO);
+               x86_patch(patch, inst);
+#endif
+               x86_64_clear_reg(inst, X86_64_RDX);
+               x86_64_div_reg_size(inst, $3, 8);
+       }
+
 /*
  * single precision float versions
  */
@@ -1099,19 +1554,19 @@
 
 JIT_OP_BR_IFALSE: branch
        [reg] -> {
-               x86_64_or_reg_reg_size(inst, $1, $1, 4);
+               x86_64_test_reg_reg_size(inst, $1, $1, 4);
                inst = output_branch(func, inst, 0x74 /* eq */, insn);
        }
 
 JIT_OP_BR_ITRUE: branch
        [reg] -> {
-               x86_64_or_reg_reg_size(inst, $1, $1, 4);
+               x86_64_test_reg_reg_size(inst, $1, $1, 4);
                inst = output_branch(func, inst, 0x75 /* ne */, insn);
        }
 
 JIT_OP_BR_IEQ: branch
        [reg, immzero] -> {
-               x86_64_or_reg_reg_size(inst, $1, $1, 4);
+               x86_64_test_reg_reg_size(inst, $1, $1, 4);
                inst = output_branch(func, inst, 0x74 /* eq */, insn);
        }
        [reg, imm] -> {
@@ -1129,7 +1584,7 @@
 
 JIT_OP_BR_INE: branch
        [reg, immzero] -> {
-               x86_64_or_reg_reg_size(inst, $1, $1, 4);
+               x86_64_test_reg_reg_size(inst, $1, $1, 4);
                inst = output_branch(func, inst, 0x75 /* ne */, insn);
        }
        [reg, imm] -> {
@@ -1259,19 +1714,19 @@
 
 JIT_OP_BR_LFALSE: branch
        [reg] -> {
-               x86_64_or_reg_reg_size(inst, $1, $1, 8);
+               x86_64_test_reg_reg_size(inst, $1, $1, 8);
                inst = output_branch(func, inst, 0x74 /* eq */, insn);
        }
 
 JIT_OP_BR_LTRUE: branch
        [reg] -> {
-               x86_64_or_reg_reg_size(inst, $1, $1, 8);
+               x86_64_test_reg_reg_size(inst, $1, $1, 8);
                inst = output_branch(func, inst, 0x75 /* ne */, insn);
        }
 
 JIT_OP_BR_LEQ: branch
        [reg, immzero] -> {
-               x86_64_or_reg_reg_size(inst, $1, $1, 8);
+               x86_64_test_reg_reg_size(inst, $1, $1, 8);
                inst = output_branch(func, inst, 0x74 /* eq */, insn);
        }
        [reg, imm, if("($2 >= (jit_nint)jit_min_int && $2 <= 
(jit_nint)jit_max_int)")] -> {
@@ -1289,7 +1744,7 @@
 
 JIT_OP_BR_LNE: branch
        [reg, immzero] -> {
-               x86_64_or_reg_reg_size(inst, $1, $1, 8);
+               x86_64_test_reg_reg_size(inst, $1, $1, 8);
                inst = output_branch(func, inst, 0x75 /* ne */, insn);
        }
        [reg, imm, if("($2 >= (jit_nint)jit_min_int && $2 <= 
(jit_nint)jit_max_int)")] -> {
@@ -1729,7 +2184,7 @@
                x86_64_cmp_reg_membase_size(inst, $1, $1, 0, 8);
 #else
                unsigned char *patch;
-               x86_64_or_reg_reg_size(inst, $1, $1, 8);
+               x86_64_test_reg_reg_size(inst, $1, $1, 8);
                patch = inst;
                x86_branch8(inst, X86_CC_NE, 0, 0);
                inst = throw_builtin(inst, func, JIT_RESULT_NULL_REFERENCE);




reply via email to

[Prev in Thread] Current Thread [Next in Thread]