diff options
Diffstat (limited to 'thirdparty/pcre2/src/sljit/sljitNativeX86_common.c')
-rw-r--r-- | thirdparty/pcre2/src/sljit/sljitNativeX86_common.c | 792 |
1 files changed, 319 insertions, 473 deletions
diff --git a/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c b/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c index 12a0e272af..ab7b36adb2 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c @@ -1,7 +1,7 @@ /* * Stack-less Just-In-Time compiler * - * Copyright 2009-2012 Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. + * Copyright Zoltan Herczeg (hzmester@freemail.hu). All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are * permitted provided that the following conditions are met: @@ -26,7 +26,11 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) { +#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) + return "x86" SLJIT_CPUINFO " ABI:fastcall"; +#else return "x86" SLJIT_CPUINFO; +#endif } /* @@ -35,7 +39,7 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) 1 - ECX 2 - EDX 3 - EBX - 4 - none + 4 - ESP 5 - EBP 6 - ESI 7 - EDI @@ -47,7 +51,7 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) 1 - RCX 2 - RDX 3 - RBX - 4 - none + 4 - RSP 5 - RBP 6 - RSI 7 - RDI @@ -67,12 +71,15 @@ SLJIT_API_FUNC_ATTRIBUTE const char* sljit_get_platform_name(void) #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = { - 0, 0, 2, 1, 0, 0, 0, 0, 7, 6, 3, 4, 5 + 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 7, 6, 3, 4, 5 }; #define CHECK_EXTRA_REGS(p, w, do) \ - if (p >= SLJIT_R3 && p <= SLJIT_R6) { \ - w = SLJIT_LOCALS_OFFSET + ((p) - (SLJIT_R3 + 4)) * sizeof(sljit_sw); \ + if (p >= SLJIT_R3 && p <= SLJIT_S3) { \ + if (p <= compiler->scratches) \ + w = compiler->saveds_offset - ((p) - SLJIT_R2) * (sljit_sw)sizeof(sljit_sw); \ + else \ + w = compiler->locals_offset + ((p) - SLJIT_S2) * (sljit_sw)sizeof(sljit_sw); \ p = SLJIT_MEM1(SLJIT_SP); \ do; \ } @@ -82,31 +89,39 @@ static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 3] = { /* Last register + 1. */ #define TMP_REG1 (SLJIT_NUMBER_OF_REGISTERS + 2) #define TMP_REG2 (SLJIT_NUMBER_OF_REGISTERS + 3) -#define TMP_REG3 (SLJIT_NUMBER_OF_REGISTERS + 4) /* Note: r12 & 0x7 == 0b100, which decoded as SIB byte present Note: avoid to use r12 and r13 for memory addessing - therefore r12 is better for SAVED_EREG than SAVED_REG. */ + therefore r12 is better to be a higher saved register. */ #ifndef _WIN64 -/* 1st passed in rdi, 2nd argument passed in rsi, 3rd in rdx. */ -static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = { - 0, 0, 6, 1, 8, 11, 10, 12, 5, 13, 14, 15, 3, 4, 2, 7, 9 +/* Args: rdi(=7), rsi(=6), rdx(=2), rcx(=1), r8, r9. Scratches: rax(=0), r10, r11 */ +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = { + 0, 0, 6, 7, 1, 8, 11, 10, 12, 5, 13, 14, 15, 3, 4, 2, 9 }; /* low-map. reg_map & 0x7. */ -static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 5] = { - 0, 0, 6, 1, 0, 3, 2, 4, 5, 5, 6, 7, 3, 4, 2, 7, 1 +static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = { + 0, 0, 6, 7, 1, 0, 3, 2, 4, 5, 5, 6, 7, 3, 4, 2, 1 }; #else -/* 1st passed in rcx, 2nd argument passed in rdx, 3rd in r8. */ -static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 5] = { - 0, 0, 2, 1, 11, 12, 5, 13, 14, 15, 7, 6, 3, 4, 10, 8, 9 +/* Args: rcx(=1), rdx(=2), r8, r9. Scratches: rax(=0), r10, r11 */ +static const sljit_u8 reg_map[SLJIT_NUMBER_OF_REGISTERS + 4] = { + 0, 0, 2, 8, 1, 11, 12, 5, 13, 14, 15, 7, 6, 3, 4, 9, 10 }; /* low-map. reg_map & 0x7. */ -static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 5] = { - 0, 0, 2, 1, 3, 4, 5, 5, 6, 7, 7, 6, 3, 4, 2, 0, 1 +static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 4] = { + 0, 0, 2, 0, 1, 3, 4, 5, 5, 6, 7, 7, 6, 3, 4, 1, 2 }; #endif +/* Args: xmm0-xmm3 */ +static const sljit_u8 freg_map[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { + 4, 0, 1, 2, 3, 5, 6 +}; +/* low-map. freg_map & 0x7. */ +static const sljit_u8 freg_lmap[SLJIT_NUMBER_OF_FLOAT_REGISTERS + 1] = { + 4, 0, 1, 2, 3, 5, 6 +}; + #define REX_W 0x48 #define REX_R 0x44 #define REX_X 0x42 @@ -166,7 +181,7 @@ static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 5] = { #define CALL_i32 0xe8 #define CALL_rm (/* GROUP_FF */ 2 << 3) #define CDQ 0x99 -#define CMOVNE_r_rm (/* GROUP_0F */ 0x45) +#define CMOVE_r_rm (/* GROUP_0F */ 0x44) #define CMP (/* BINARY */ 7 << 3) #define CMP_EAX_i32 0x3d #define CMP_r_rm 0x3b @@ -176,6 +191,8 @@ static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 5] = { #define CVTTSD2SI_r_xm 0x2c #define DIV (/* GROUP_F7 */ 6 << 3) #define DIVSD_x_xm 0x5e +#define FSTPS 0xd9 +#define FSTPD 0xdd #define INT3 0xcc #define IDIV (/* GROUP_F7 */ 7 << 3) #define IMUL (/* GROUP_F7 */ 5 << 3) @@ -214,6 +231,7 @@ static const sljit_u8 reg_lmap[SLJIT_NUMBER_OF_REGISTERS + 5] = { #define POP_r 0x58 #define POP_rm 0x8f #define POPF 0x9d +#define PREFETCH 0x18 #define PUSH_i32 0x68 #define PUSH_r 0x50 #define PUSH_rm (/* GROUP_FF */ 6 << 3) @@ -459,11 +477,7 @@ static sljit_u8* generate_near_jump_code(struct sljit_jump *jump, sljit_u8 *code code_ptr += sizeof(sljit_s8); } else { jump->flags |= PATCH_MW; -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - code_ptr += sizeof(sljit_sw); -#else code_ptr += sizeof(sljit_s32); -#endif } return code_ptr; @@ -585,18 +599,59 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil return (void*)(code + executable_offset); } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) +{ + switch (feature_type) { + case SLJIT_HAS_FPU: +#ifdef SLJIT_IS_FPU_AVAILABLE + return SLJIT_IS_FPU_AVAILABLE; +#elif (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) + if (cpu_has_sse2 == -1) + get_cpu_features(); + return cpu_has_sse2; +#else /* SLJIT_DETECT_SSE2 */ + return 1; +#endif /* SLJIT_DETECT_SSE2 */ + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + case SLJIT_HAS_VIRTUAL_REGISTERS: + return 1; +#endif + + case SLJIT_HAS_CLZ: + case SLJIT_HAS_CMOV: + if (cpu_has_cmov == -1) + get_cpu_features(); + return cpu_has_cmov; + + case SLJIT_HAS_SSE2: +#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) + if (cpu_has_sse2 == -1) + get_cpu_features(); + return cpu_has_sse2; +#else + return 1; +#endif + + default: + return 0; + } +} + /* --------------------------------------------------------------------- */ /* Operators */ /* --------------------------------------------------------------------- */ +#define BINARY_OPCODE(opcode) (((opcode ## _EAX_i32) << 24) | ((opcode ## _r_rm) << 16) | ((opcode ## _rm_r) << 8) | (opcode)) + static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler, - sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm, + sljit_u32 op_types, sljit_s32 dst, sljit_sw dstw, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w); static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler, - sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm, + sljit_u32 op_types, sljit_s32 dst, sljit_sw dstw, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w); @@ -605,57 +660,19 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_s32 src, sljit_sw srcw); -static SLJIT_INLINE sljit_s32 emit_save_flags(struct sljit_compiler *compiler) -{ - sljit_u8 *inst; - -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = (sljit_u8*)ensure_buf(compiler, 1 + 5); - FAIL_IF(!inst); - INC_SIZE(5); -#else - inst = (sljit_u8*)ensure_buf(compiler, 1 + 6); - FAIL_IF(!inst); - INC_SIZE(6); - *inst++ = REX_W; -#endif - *inst++ = LEA_r_m; /* lea esp/rsp, [esp/rsp + sizeof(sljit_sw)] */ - *inst++ = 0x64; - *inst++ = 0x24; - *inst++ = (sljit_u8)sizeof(sljit_sw); - *inst++ = PUSHF; - compiler->flags_saved = 1; - return SLJIT_SUCCESS; -} +#define EMIT_MOV(compiler, dst, dstw, src, srcw) \ + FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw)); -static SLJIT_INLINE sljit_s32 emit_restore_flags(struct sljit_compiler *compiler, sljit_s32 keep_flags) -{ - sljit_u8 *inst; +static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler, + sljit_s32 single, sljit_s32 dst, sljit_sw dstw, sljit_s32 src); -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = (sljit_u8*)ensure_buf(compiler, 1 + 5); - FAIL_IF(!inst); - INC_SIZE(5); - *inst++ = POPF; -#else - inst = (sljit_u8*)ensure_buf(compiler, 1 + 6); - FAIL_IF(!inst); - INC_SIZE(6); - *inst++ = POPF; - *inst++ = REX_W; -#endif - *inst++ = LEA_r_m; /* lea esp/rsp, [esp/rsp - sizeof(sljit_sw)] */ - *inst++ = 0x64; - *inst++ = 0x24; - *inst++ = (sljit_u8)(-(sljit_s8)sizeof(sljit_sw)); - compiler->flags_saved = keep_flags; - return SLJIT_SUCCESS; -} +static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler, + sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw); #ifdef _WIN32 #include <malloc.h> -static void SLJIT_CALL sljit_grow_stack(sljit_sw local_size) +static void SLJIT_FUNC sljit_grow_stack(sljit_sw local_size) { /* Workaround for calling the internal _chkstk() function on Windows. This function touches all 4k pages belongs to the requested stack space, @@ -681,15 +698,8 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler, { sljit_u8* inst; - if (dst == SLJIT_UNUSED) { - /* No destination, doesn't need to setup flags. */ - if (src & SLJIT_MEM) { - inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src, srcw); - FAIL_IF(!inst); - *inst = MOV_r_rm; - } - return SLJIT_SUCCESS; - } + SLJIT_ASSERT(dst != SLJIT_UNUSED); + if (FAST_IS_REG(src)) { inst = emit_x86_instruction(compiler, 1, src, 0, dst, dstw); FAIL_IF(!inst); @@ -711,8 +721,10 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler, } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) if (!compiler->mode32 && NOT_HALFWORD(srcw)) { - FAIL_IF(emit_load_imm64(compiler, TMP_REG2, srcw)); - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, dst, dstw); + /* Immediate to memory move. Only SLJIT_MOV operation copies + an immediate directly into memory so TMP_REG1 can be used. */ + FAIL_IF(emit_load_imm64(compiler, TMP_REG1, srcw)); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, dst, dstw); FAIL_IF(!inst); *inst = MOV_rm_r; return SLJIT_SUCCESS; @@ -730,7 +742,8 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler, return SLJIT_SUCCESS; } - /* Memory to memory move. Requires two instruction. */ + /* Memory to memory move. Only SLJIT_MOV operation copies + data from memory to memory so TMP_REG1 can be used. */ inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src, srcw); FAIL_IF(!inst); *inst = MOV_r_rm; @@ -740,9 +753,6 @@ static sljit_s32 emit_mov(struct sljit_compiler *compiler, return SLJIT_SUCCESS; } -#define EMIT_MOV(compiler, dst, dstw, src, srcw) \ - FAIL_IF(emit_mov(compiler, dst, dstw, src, srcw)); - SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compiler, sljit_s32 op) { sljit_u8 *inst; @@ -772,20 +782,17 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile case SLJIT_DIVMOD_SW: case SLJIT_DIV_UW: case SLJIT_DIV_SW: - compiler->flags_saved = 0; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) #ifdef _WIN64 - SLJIT_COMPILE_ASSERT( + SLJIT_ASSERT( reg_map[SLJIT_R0] == 0 && reg_map[SLJIT_R1] == 2 - && reg_map[TMP_REG1] > 7, - invalid_register_assignment_for_div_mul); + && reg_map[TMP_REG1] > 7); #else - SLJIT_COMPILE_ASSERT( + SLJIT_ASSERT( reg_map[SLJIT_R0] == 0 && reg_map[SLJIT_R1] < 7 - && reg_map[TMP_REG1] == 2, - invalid_register_assignment_for_div_mul); + && reg_map[TMP_REG1] == 2); #endif compiler->mode32 = op & SLJIT_I32_OP; #endif @@ -909,9 +916,6 @@ static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign, compiler->mode32 = 0; #endif - if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM)) - return SLJIT_SUCCESS; /* Empty instruction. */ - if (src & SLJIT_IMM) { if (FAST_IS_REG(dst)) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) @@ -1040,6 +1044,30 @@ static sljit_s32 emit_mov_byte(struct sljit_compiler *compiler, sljit_s32 sign, return SLJIT_SUCCESS; } +static sljit_s32 emit_prefetch(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src, sljit_sw srcw) +{ + sljit_u8* inst; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = 1; +#endif + + inst = emit_x86_instruction(compiler, 2, 0, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst++ = PREFETCH; + + if (op >= SLJIT_MOV_U8 && op <= SLJIT_MOV_S8) + *inst |= (3 << 3); + else if (op >= SLJIT_MOV_U16 && op <= SLJIT_MOV_S16) + *inst |= (2 << 3); + else + *inst |= (1 << 3); + + return SLJIT_SUCCESS; +} + static sljit_s32 emit_mov_half(struct sljit_compiler *compiler, sljit_s32 sign, sljit_s32 dst, sljit_sw dstw, sljit_s32 src, sljit_sw srcw) @@ -1051,9 +1079,6 @@ static sljit_s32 emit_mov_half(struct sljit_compiler *compiler, sljit_s32 sign, compiler->mode32 = 0; #endif - if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM)) - return SLJIT_SUCCESS; /* Empty instruction. */ - if (src & SLJIT_IMM) { if (FAST_IS_REG(dst)) { #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) @@ -1097,14 +1122,6 @@ static sljit_s32 emit_unary(struct sljit_compiler *compiler, sljit_u8 opcode, { sljit_u8* inst; - if (dst == SLJIT_UNUSED) { - EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0); - FAIL_IF(!inst); - *inst++ = GROUP_F7; - *inst |= opcode; - return SLJIT_SUCCESS; - } if (dst == src && dstw == srcw) { /* Same input and output */ inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); @@ -1113,14 +1130,19 @@ static sljit_s32 emit_unary(struct sljit_compiler *compiler, sljit_u8 opcode, *inst |= opcode; return SLJIT_SUCCESS; } + + if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) + dst = TMP_REG1; + if (FAST_IS_REG(dst)) { EMIT_MOV(compiler, dst, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0); FAIL_IF(!inst); *inst++ = GROUP_F7; *inst |= opcode; return SLJIT_SUCCESS; } + EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0); FAIL_IF(!inst); @@ -1136,20 +1158,12 @@ static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler, { sljit_u8* inst; - if (dst == SLJIT_UNUSED) { - EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0); - FAIL_IF(!inst); - *inst++ = GROUP_F7; - *inst |= NOT_rm; - inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, TMP_REG1, 0); - FAIL_IF(!inst); - *inst = OR_r_rm; - return SLJIT_SUCCESS; - } + if (dst == SLJIT_UNUSED) + dst = TMP_REG1; + if (FAST_IS_REG(dst)) { EMIT_MOV(compiler, dst, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, dst, dstw); + inst = emit_x86_instruction(compiler, 1, 0, 0, dst, 0); FAIL_IF(!inst); *inst++ = GROUP_F7; *inst |= NOT_rm; @@ -1158,6 +1172,7 @@ static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler, *inst = OR_r_rm; return SLJIT_SUCCESS; } + EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0); FAIL_IF(!inst); @@ -1170,6 +1185,10 @@ static sljit_s32 emit_not_with_flags(struct sljit_compiler *compiler, return SLJIT_SUCCESS; } +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) +static const sljit_sw emit_clz_arg = 32 + 31; +#endif + static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags, sljit_s32 dst, sljit_sw dstw, sljit_s32 src, sljit_sw srcw) @@ -1178,104 +1197,54 @@ static sljit_s32 emit_clz(struct sljit_compiler *compiler, sljit_s32 op_flags, sljit_s32 dst_r; SLJIT_UNUSED_ARG(op_flags); - if (SLJIT_UNLIKELY(dst == SLJIT_UNUSED)) { - /* Just set the zero flag. */ - EMIT_MOV(compiler, TMP_REG1, 0, src, srcw); - inst = emit_x86_instruction(compiler, 1, 0, 0, TMP_REG1, 0); - FAIL_IF(!inst); - *inst++ = GROUP_F7; - *inst |= NOT_rm; -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, 31, TMP_REG1, 0); -#else - inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? 63 : 31, TMP_REG1, 0); -#endif - FAIL_IF(!inst); - *inst |= SHR; - return SLJIT_SUCCESS; - } - if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { - EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw); - src = TMP_REG1; - srcw = 0; - } + if (cpu_has_cmov == -1) + get_cpu_features(); - inst = emit_x86_instruction(compiler, 2, TMP_REG1, 0, src, srcw); + dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + + inst = emit_x86_instruction(compiler, 2, dst_r, 0, src, srcw); FAIL_IF(!inst); *inst++ = GROUP_0F; *inst = BSR_r_rm; #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (FAST_IS_REG(dst)) - dst_r = dst; - else { - /* Find an unused temporary register. */ - if ((dst & REG_MASK) != SLJIT_R0 && (dst & OFFS_REG_MASK) != TO_OFFS_REG(SLJIT_R0)) - dst_r = SLJIT_R0; - else if ((dst & REG_MASK) != SLJIT_R1 && (dst & OFFS_REG_MASK) != TO_OFFS_REG(SLJIT_R1)) - dst_r = SLJIT_R1; + if (cpu_has_cmov) { + if (dst_r != TMP_REG1) { + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 32 + 31); + inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG1, 0); + } else - dst_r = SLJIT_R2; - EMIT_MOV(compiler, dst, dstw, dst_r, 0); - } - EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, 32 + 31); -#else - dst_r = FAST_IS_REG(dst) ? dst : TMP_REG2; - compiler->mode32 = 0; - EMIT_MOV(compiler, dst_r, 0, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? 64 + 63 : 32 + 31); - compiler->mode32 = op_flags & SLJIT_I32_OP; -#endif + inst = emit_x86_instruction(compiler, 2, dst_r, 0, SLJIT_MEM0(), (sljit_sw)&emit_clz_arg); - if (cpu_has_cmov == -1) - get_cpu_features(); - - if (cpu_has_cmov) { - inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst++ = GROUP_0F; - *inst = CMOVNE_r_rm; - } else { -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = (sljit_u8*)ensure_buf(compiler, 1 + 4); - FAIL_IF(!inst); - INC_SIZE(4); + *inst = CMOVE_r_rm; + } + else + FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, 32 + 31)); - *inst++ = JE_i8; - *inst++ = 2; - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_map[dst_r] << 3) | reg_map[TMP_REG1]; + inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0); #else - inst = (sljit_u8*)ensure_buf(compiler, 1 + 5); - FAIL_IF(!inst); - INC_SIZE(5); + if (cpu_has_cmov) { + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31)); - *inst++ = JE_i8; - *inst++ = 3; - *inst++ = REX_W | (reg_map[dst_r] >= 8 ? REX_R : 0) | (reg_map[TMP_REG1] >= 8 ? REX_B : 0); - *inst++ = MOV_r_rm; - *inst++ = MOD_REG | (reg_lmap[dst_r] << 3) | reg_lmap[TMP_REG1]; -#endif + inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = CMOVE_r_rm; } + else + FAIL_IF(sljit_emit_cmov_generic(compiler, SLJIT_EQUAL, dst_r, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? (64 + 63) : (32 + 31))); -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, 31, dst_r, 0); -#else inst = emit_x86_instruction(compiler, 1 | EX86_BIN_INS, SLJIT_IMM, !(op_flags & SLJIT_I32_OP) ? 63 : 31, dst_r, 0); #endif + FAIL_IF(!inst); *(inst + 1) |= XOR; -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (dst & SLJIT_MEM) { - inst = emit_x86_instruction(compiler, 1, dst_r, 0, dst, dstw); - FAIL_IF(!inst); - *inst = XCHG_r_rm; - } -#else if (dst & SLJIT_MEM) - EMIT_MOV(compiler, dst, dstw, TMP_REG2, 0); -#endif + EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } @@ -1283,14 +1252,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile sljit_s32 dst, sljit_sw dstw, sljit_s32 src, sljit_sw srcw) { - sljit_u8* inst; - sljit_s32 update = 0; sljit_s32 op_flags = GET_ALL_FLAGS(op); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) sljit_s32 dst_is_ereg = 0; - sljit_s32 src_is_ereg = 0; -#else -# define src_is_ereg 0 #endif CHECK_ERROR(); @@ -1299,40 +1263,42 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile ADJUST_LOCAL_OFFSET(src, srcw); CHECK_EXTRA_REGS(dst, dstw, dst_is_ereg = 1); - CHECK_EXTRA_REGS(src, srcw, src_is_ereg = 1); + CHECK_EXTRA_REGS(src, srcw, (void)0); #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) compiler->mode32 = op_flags & SLJIT_I32_OP; #endif + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) { + if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) + return emit_prefetch(compiler, op, src, srcw); + return SLJIT_SUCCESS; + } + op = GET_OPCODE(op); - if (op >= SLJIT_MOV && op <= SLJIT_MOVU_P) { + + if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) { #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) compiler->mode32 = 0; #endif + if (FAST_IS_REG(src) && src == dst) { + if (!TYPE_CAST_NEEDED(op)) + return SLJIT_SUCCESS; + } + if (op_flags & SLJIT_I32_OP) { - if (FAST_IS_REG(src) && src == dst) { - if (!TYPE_CAST_NEEDED(op)) - return SLJIT_SUCCESS; - } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (op == SLJIT_MOV_S32 && (src & SLJIT_MEM)) - op = SLJIT_MOV_U32; - if (op == SLJIT_MOVU_S32 && (src & SLJIT_MEM)) - op = SLJIT_MOVU_U32; - if (op == SLJIT_MOV_U32 && (src & SLJIT_IMM)) - op = SLJIT_MOV_S32; - if (op == SLJIT_MOVU_U32 && (src & SLJIT_IMM)) - op = SLJIT_MOVU_S32; + if (src & SLJIT_MEM) { + if (op == SLJIT_MOV_S32) + op = SLJIT_MOV_U32; + } + else if (src & SLJIT_IMM) { + if (op == SLJIT_MOV_U32) + op = SLJIT_MOV_S32; + } #endif } - SLJIT_COMPILE_ASSERT(SLJIT_MOV + 8 == SLJIT_MOVU, movu_offset); - if (op >= SLJIT_MOVU) { - update = 1; - op -= 8; - } - if (src & SLJIT_IMM) { switch (op) { case SLJIT_MOV_U8: @@ -1362,14 +1328,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile #endif } - if (SLJIT_UNLIKELY(update) && (src & SLJIT_MEM) && !src_is_ereg && (src & REG_MASK) && (srcw != 0 || (src & OFFS_REG_MASK) != 0)) { - inst = emit_x86_instruction(compiler, 1, src & REG_MASK, 0, src, srcw); - FAIL_IF(!inst); - *inst = LEA_r_m; - src &= SLJIT_MEM | 0xf; - srcw = 0; - } - #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) if (SLJIT_UNLIKELY(dst_is_ereg) && (!(op == SLJIT_MOV || op == SLJIT_MOV_U32 || op == SLJIT_MOV_S32 || op == SLJIT_MOV_P) || (src & SLJIT_MEM))) { SLJIT_ASSERT(dst == SLJIT_MEM1(SLJIT_SP)); @@ -1412,40 +1370,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile if (SLJIT_UNLIKELY(dst_is_ereg) && dst == TMP_REG1) return emit_mov(compiler, SLJIT_MEM1(SLJIT_SP), dstw, TMP_REG1, 0); #endif - - if (SLJIT_UNLIKELY(update) && (dst & SLJIT_MEM) && (dst & REG_MASK) && (dstw != 0 || (dst & OFFS_REG_MASK) != 0)) { - inst = emit_x86_instruction(compiler, 1, dst & REG_MASK, 0, dst, dstw); - FAIL_IF(!inst); - *inst = LEA_r_m; - } return SLJIT_SUCCESS; } - if (SLJIT_UNLIKELY(GET_FLAGS(op_flags))) - compiler->flags_saved = 0; - switch (op) { case SLJIT_NOT: - if (SLJIT_UNLIKELY(op_flags & SLJIT_SET_E)) + if (SLJIT_UNLIKELY(op_flags & SLJIT_SET_Z)) return emit_not_with_flags(compiler, dst, dstw, src, srcw); return emit_unary(compiler, NOT_rm, dst, dstw, src, srcw); case SLJIT_NEG: - if (SLJIT_UNLIKELY(op_flags & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); return emit_unary(compiler, NEG_rm, dst, dstw, src, srcw); case SLJIT_CLZ: - if (SLJIT_UNLIKELY(op_flags & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); return emit_clz(compiler, op_flags, dst, dstw, src, srcw); } return SLJIT_SUCCESS; - -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) -# undef src_is_ereg -#endif } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) @@ -1457,8 +1398,8 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile *(inst + 1) |= (op_imm); \ } \ else { \ - FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immw)); \ - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, arg, argw); \ + FAIL_IF(emit_load_imm64(compiler, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, immw)); \ + inst = emit_x86_instruction(compiler, 1, (arg == TMP_REG1) ? TMP_REG2 : TMP_REG1, 0, arg, argw); \ FAIL_IF(!inst); \ *inst = (op_mr); \ } @@ -1479,12 +1420,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile #endif static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler, - sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm, + sljit_u32 op_types, sljit_s32 dst, sljit_sw dstw, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w) { sljit_u8* inst; + sljit_u8 op_eax_imm = (op_types >> 24); + sljit_u8 op_rm = (op_types >> 16) & 0xff; + sljit_u8 op_mr = (op_types >> 8) & 0xff; + sljit_u8 op_imm = op_types & 0xff; if (dst == SLJIT_UNUSED) { EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); @@ -1595,12 +1540,16 @@ static sljit_s32 emit_cum_binary(struct sljit_compiler *compiler, } static sljit_s32 emit_non_cum_binary(struct sljit_compiler *compiler, - sljit_u8 op_rm, sljit_u8 op_mr, sljit_u8 op_imm, sljit_u8 op_eax_imm, + sljit_u32 op_types, sljit_s32 dst, sljit_sw dstw, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w) { sljit_u8* inst; + sljit_u8 op_eax_imm = (op_types >> 24); + sljit_u8 op_rm = (op_types >> 16) & 0xff; + sljit_u8 op_mr = (op_types >> 8) & 0xff; + sljit_u8 op_imm = op_types & 0xff; if (dst == SLJIT_UNUSED) { EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); @@ -1684,7 +1633,7 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler, sljit_u8* inst; sljit_s32 dst_r; - dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; + dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1; /* Register destination. */ if (dst_r == src1 && !(src2 & SLJIT_IMM)) { @@ -1736,9 +1685,9 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler, sljit_unaligned_store_s32(inst, (sljit_s32)src1w); } else { - EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, src1w); if (dst_r != src2) EMIT_MOV(compiler, dst_r, 0, src2, src2w); + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src1w)); inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); FAIL_IF(!inst); *inst++ = GROUP_0F; @@ -1779,9 +1728,9 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler, sljit_unaligned_store_s32(inst, (sljit_s32)src2w); } else { - EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_IMM, src2w); if (dst_r != src1) EMIT_MOV(compiler, dst_r, 0, src1, src1w); + FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w)); inst = emit_x86_instruction(compiler, 2, dst_r, 0, TMP_REG2, 0); FAIL_IF(!inst); *inst++ = GROUP_0F; @@ -1800,13 +1749,13 @@ static sljit_s32 emit_mul(struct sljit_compiler *compiler, *inst = IMUL_r_rm; } - if (dst_r == TMP_REG1) + if (dst & SLJIT_MEM) EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } -static sljit_s32 emit_lea_binary(struct sljit_compiler *compiler, sljit_s32 keep_flags, +static sljit_s32 emit_lea_binary(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w) @@ -1815,12 +1764,10 @@ static sljit_s32 emit_lea_binary(struct sljit_compiler *compiler, sljit_s32 keep sljit_s32 dst_r, done = 0; /* These cases better be left to handled by normal way. */ - if (!keep_flags) { - if (dst == src1 && dstw == src1w) - return SLJIT_ERR_UNSUPPORTED; - if (dst == src2 && dstw == src2w) - return SLJIT_ERR_UNSUPPORTED; - } + if (dst == src1 && dstw == src1w) + return SLJIT_ERR_UNSUPPORTED; + if (dst == src2 && dstw == src2w) + return SLJIT_ERR_UNSUPPORTED; dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; @@ -1932,7 +1879,7 @@ static sljit_s32 emit_test_binary(struct sljit_compiler *compiler, } #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (src2 == SLJIT_R0 && (src2 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) { + if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128) && (compiler->mode32 || IS_HALFWORD(src1w))) { #else if (src2 == SLJIT_R0 && (src1 & SLJIT_IMM) && (src1w > 127 || src1w < -128)) { #endif @@ -1949,8 +1896,8 @@ static sljit_s32 emit_test_binary(struct sljit_compiler *compiler, *inst = GROUP_F7; } else { - FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src2w)); - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, src1, src1w); + FAIL_IF(emit_load_imm64(compiler, TMP_REG1, src2w)); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src1, src1w); FAIL_IF(!inst); *inst = TEST_rm_r; } @@ -1978,8 +1925,8 @@ static sljit_s32 emit_test_binary(struct sljit_compiler *compiler, *inst = GROUP_F7; } else { - FAIL_IF(emit_load_imm64(compiler, TMP_REG2, src1w)); - inst = emit_x86_instruction(compiler, 1, TMP_REG2, 0, src2, src2w); + FAIL_IF(emit_load_imm64(compiler, TMP_REG1, src1w)); + inst = emit_x86_instruction(compiler, 1, TMP_REG1, 0, src2, src2w); FAIL_IF(!inst); *inst = TEST_rm_r; } @@ -2080,7 +2027,7 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler, *inst |= mode; EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); } - else if (FAST_IS_REG(dst) && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) { + else if (SLOW_IS_REG(dst) && dst != src2 && !ADDRESSING_DEPENDS_ON(src2, dst)) { if (src1 != dst) EMIT_MOV(compiler, dst, 0, src1, src1w); EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_PREF_SHIFT_REG, 0); @@ -2091,25 +2038,26 @@ static sljit_s32 emit_shift(struct sljit_compiler *compiler, EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); } else { - /* This case is really difficult, since ecx itself may used for - addressing, and we must ensure to work even in that case. */ + /* This case is complex since ecx itself may be used for + addressing, and this case must be supported as well. */ EMIT_MOV(compiler, TMP_REG1, 0, src1, src1w); -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0); +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), 0, SLJIT_PREF_SHIFT_REG, 0); + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); + inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); + FAIL_IF(!inst); + *inst |= mode; + EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), 0); #else - /* [esp+0] contains the flags. */ - EMIT_MOV(compiler, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_sw), SLJIT_PREF_SHIFT_REG, 0); -#endif + EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_PREF_SHIFT_REG, 0); EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, src2, src2w); inst = emit_x86_instruction(compiler, 1 | EX86_SHIFT_INS, SLJIT_PREF_SHIFT_REG, 0, TMP_REG1, 0); FAIL_IF(!inst); *inst |= mode; -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, TMP_REG2, 0); -#else - EMIT_MOV(compiler, SLJIT_PREF_SHIFT_REG, 0, SLJIT_MEM1(SLJIT_SP), sizeof(sljit_sw)); #endif - EMIT_MOV(compiler, dst, dstw, TMP_REG1, 0); + if (dst != SLJIT_UNUSED) + return emit_mov(compiler, dst, dstw, TMP_REG1, 0); } return SLJIT_SUCCESS; @@ -2133,7 +2081,7 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler, if (!set_flags) return emit_mov(compiler, dst, dstw, src1, src1w); /* OR dst, src, 0 */ - return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, OR_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(OR), dst, dstw, src1, src1w, SLJIT_IMM, 0); } @@ -2143,10 +2091,10 @@ static sljit_s32 emit_shift_with_flags(struct sljit_compiler *compiler, if (!FAST_IS_REG(dst)) FAIL_IF(emit_cmp_binary(compiler, src1, src1w, SLJIT_IMM, 0)); - FAIL_IF(emit_shift(compiler,mode, dst, dstw, src1, src1w, src2, src2w)); + FAIL_IF(emit_shift(compiler, mode, dst, dstw, src1, src1w, src2, src2w)); if (FAST_IS_REG(dst)) - return emit_cmp_binary(compiler, dst, dstw, SLJIT_IMM, 0); + return emit_cmp_binary(compiler, (dst == SLJIT_UNUSED) ? TMP_REG1 : dst, dstw, SLJIT_IMM, 0); return SLJIT_SUCCESS; } @@ -2168,77 +2116,54 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile compiler->mode32 = op & SLJIT_I32_OP; #endif - if (GET_OPCODE(op) >= SLJIT_MUL) { - if (SLJIT_UNLIKELY(GET_FLAGS(op))) - compiler->flags_saved = 0; - else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); - } + if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) + return SLJIT_SUCCESS; switch (GET_OPCODE(op)) { case SLJIT_ADD: - if (!GET_FLAGS(op)) { - if (emit_lea_binary(compiler, op & SLJIT_KEEP_FLAGS, dst, dstw, src1, src1w, src2, src2w) != SLJIT_ERR_UNSUPPORTED) + if (!HAS_FLAGS(op)) { + if (emit_lea_binary(compiler, dst, dstw, src1, src1w, src2, src2w) != SLJIT_ERR_UNSUPPORTED) return compiler->error; } - else - compiler->flags_saved = 0; - if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); - return emit_cum_binary(compiler, ADD_r_rm, ADD_rm_r, ADD, ADD_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(ADD), dst, dstw, src1, src1w, src2, src2w); case SLJIT_ADDC: - if (SLJIT_UNLIKELY(compiler->flags_saved)) /* C flag must be restored. */ - FAIL_IF(emit_restore_flags(compiler, 1)); - else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS)) - FAIL_IF(emit_save_flags(compiler)); - if (SLJIT_UNLIKELY(GET_FLAGS(op))) - compiler->flags_saved = 0; - return emit_cum_binary(compiler, ADC_r_rm, ADC_rm_r, ADC, ADC_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(ADC), dst, dstw, src1, src1w, src2, src2w); case SLJIT_SUB: - if (!GET_FLAGS(op)) { - if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, op & SLJIT_KEEP_FLAGS, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED) + if (!HAS_FLAGS(op)) { + if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED) return compiler->error; } - else - compiler->flags_saved = 0; - if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS) && !compiler->flags_saved) - FAIL_IF(emit_save_flags(compiler)); + if (dst == SLJIT_UNUSED) return emit_cmp_binary(compiler, src1, src1w, src2, src2w); - return emit_non_cum_binary(compiler, SUB_r_rm, SUB_rm_r, SUB, SUB_EAX_i32, + return emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), dst, dstw, src1, src1w, src2, src2w); case SLJIT_SUBC: - if (SLJIT_UNLIKELY(compiler->flags_saved)) /* C flag must be restored. */ - FAIL_IF(emit_restore_flags(compiler, 1)); - else if (SLJIT_UNLIKELY(op & SLJIT_KEEP_FLAGS)) - FAIL_IF(emit_save_flags(compiler)); - if (SLJIT_UNLIKELY(GET_FLAGS(op))) - compiler->flags_saved = 0; - return emit_non_cum_binary(compiler, SBB_r_rm, SBB_rm_r, SBB, SBB_EAX_i32, + return emit_non_cum_binary(compiler, BINARY_OPCODE(SBB), dst, dstw, src1, src1w, src2, src2w); case SLJIT_MUL: return emit_mul(compiler, dst, dstw, src1, src1w, src2, src2w); case SLJIT_AND: if (dst == SLJIT_UNUSED) return emit_test_binary(compiler, src1, src1w, src2, src2w); - return emit_cum_binary(compiler, AND_r_rm, AND_rm_r, AND, AND_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(AND), dst, dstw, src1, src1w, src2, src2w); case SLJIT_OR: - return emit_cum_binary(compiler, OR_r_rm, OR_rm_r, OR, OR_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(OR), dst, dstw, src1, src1w, src2, src2w); case SLJIT_XOR: - return emit_cum_binary(compiler, XOR_r_rm, XOR_rm_r, XOR, XOR_EAX_i32, + return emit_cum_binary(compiler, BINARY_OPCODE(XOR), dst, dstw, src1, src1w, src2, src2w); case SLJIT_SHL: - return emit_shift_with_flags(compiler, SHL, GET_FLAGS(op), + return emit_shift_with_flags(compiler, SHL, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); case SLJIT_LSHR: - return emit_shift_with_flags(compiler, SHR, GET_FLAGS(op), + return emit_shift_with_flags(compiler, SHR, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); case SLJIT_ASHR: - return emit_shift_with_flags(compiler, SAR, GET_FLAGS(op), + return emit_shift_with_flags(compiler, SAR, HAS_FLAGS(op), dst, dstw, src1, src1w, src2, src2w); } @@ -2249,7 +2174,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) { CHECK_REG_INDEX(check_sljit_get_register_index(reg)); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) - if (reg >= SLJIT_R3 && reg <= SLJIT_R6) + if (reg >= SLJIT_R3 && reg <= SLJIT_R8) return -1; #endif return reg_map[reg]; @@ -2258,7 +2183,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_float_register_index(sljit_s32 reg) { CHECK_REG_INDEX(check_sljit_get_float_register_index(reg)); +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) return reg; +#else + return freg_map[reg]; +#endif } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *compiler, @@ -2280,36 +2209,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_custom(struct sljit_compiler *c /* Floating point operators */ /* --------------------------------------------------------------------- */ -/* Alignment + 2 * 16 bytes. */ -static sljit_s32 sse2_data[3 + (4 + 4) * 2]; +/* Alignment(3) + 4 * 16 bytes. */ +static sljit_s32 sse2_data[3 + (4 * 4)]; static sljit_s32 *sse2_buffer; static void init_compiler(void) { + /* Align to 16 bytes. */ sse2_buffer = (sljit_s32*)(((sljit_uw)sse2_data + 15) & ~0xf); - /* Single precision constants. */ + + /* Single precision constants (each constant is 16 byte long). */ sse2_buffer[0] = 0x80000000; sse2_buffer[4] = 0x7fffffff; - /* Double precision constants. */ + /* Double precision constants (each constant is 16 byte long). */ sse2_buffer[8] = 0; sse2_buffer[9] = 0x80000000; sse2_buffer[12] = 0xffffffff; sse2_buffer[13] = 0x7fffffff; } -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_is_fpu_available(void) -{ -#ifdef SLJIT_IS_FPU_AVAILABLE - return SLJIT_IS_FPU_AVAILABLE; -#elif (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) - if (cpu_has_sse2 == -1) - get_cpu_features(); - return cpu_has_sse2; -#else /* SLJIT_DETECT_SSE2 */ - return 1; -#endif /* SLJIT_DETECT_SSE2 */ -} - static sljit_s32 emit_sse2(struct sljit_compiler *compiler, sljit_u8 opcode, sljit_s32 single, sljit_s32 xmm1, sljit_s32 xmm2, sljit_sw xmm2w) { @@ -2350,7 +2268,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp sljit_s32 dst, sljit_sw dstw, sljit_s32 src, sljit_sw srcw) { - sljit_s32 dst_r = SLOW_IS_REG(dst) ? dst : TMP_REG1; + sljit_s32 dst_r = FAST_IS_REG(dst) ? dst : TMP_REG1; sljit_u8 *inst; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) @@ -2363,7 +2281,7 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_conv_sw_from_f64(struct sljit_comp *inst++ = GROUP_0F; *inst = CVTTSD2SI_r_xm; - if (dst_r == TMP_REG1 && dst != SLJIT_UNUSED) + if (dst & SLJIT_MEM) return emit_mov(compiler, dst, dstw, TMP_REG1, 0); return SLJIT_SUCCESS; } @@ -2407,11 +2325,11 @@ static SLJIT_INLINE sljit_s32 sljit_emit_fop1_cmp(struct sljit_compiler *compile sljit_s32 src1, sljit_sw src1w, sljit_s32 src2, sljit_sw src2w) { - compiler->flags_saved = 0; if (!FAST_IS_REG(src1)) { FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, TMP_FREG, src1, src1w)); src1 = TMP_FREG; } + return emit_sse2_logic(compiler, UCOMISD_x_xm, !(op & SLJIT_F32_OP), src1, src2, src2w); } @@ -2456,7 +2374,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fop1(struct sljit_compiler *compil return SLJIT_SUCCESS; } - if (SLOW_IS_REG(dst)) { + if (FAST_IS_REG(dst)) { dst_r = dst; if (dst != src) FAIL_IF(emit_sse2_load(compiler, op & SLJIT_F32_OP, dst_r, src, srcw)); @@ -2554,11 +2472,6 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_label* sljit_emit_label(struct sljit_compi CHECK_ERROR_PTR(); CHECK_PTR(check_sljit_emit_label(compiler)); - /* We should restore the flags before the label, - since other taken jumps has their own flags as well. */ - if (SLJIT_UNLIKELY(compiler->flags_saved)) - PTR_FAIL_IF(emit_restore_flags(compiler, 0)); - if (compiler->last_label && compiler->last_label->size == compiler->size) return compiler->last_label; @@ -2583,20 +2496,11 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_jump(struct sljit_compile CHECK_ERROR_PTR(); CHECK_PTR(check_sljit_emit_jump(compiler, type)); - if (SLJIT_UNLIKELY(compiler->flags_saved)) { - if ((type & 0xff) <= SLJIT_JUMP) - PTR_FAIL_IF(emit_restore_flags(compiler, 0)); - compiler->flags_saved = 0; - } - jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); PTR_FAIL_IF_NULL(jump); set_jump(jump, compiler, type & SLJIT_REWRITABLE_JUMP); type &= 0xff; - if (type >= SLJIT_CALL1) - PTR_FAIL_IF(call_with_args(compiler, type)); - /* Worst case size. */ #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) compiler->size += (type >= SLJIT_JUMP) ? 5 : 6; @@ -2623,32 +2527,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi CHECK_EXTRA_REGS(src, srcw, (void)0); - if (SLJIT_UNLIKELY(compiler->flags_saved)) { - if (type <= SLJIT_JUMP) - FAIL_IF(emit_restore_flags(compiler, 0)); - compiler->flags_saved = 0; - } - - if (type >= SLJIT_CALL1) { -#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) -#if (defined SLJIT_X86_32_FASTCALL && SLJIT_X86_32_FASTCALL) - if (src == SLJIT_R2) { - EMIT_MOV(compiler, TMP_REG1, 0, src, 0); - src = TMP_REG1; - } - if (src == SLJIT_MEM1(SLJIT_SP) && type >= SLJIT_CALL3) - srcw += sizeof(sljit_sw); -#endif -#endif -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) && defined(_WIN64) - if (src == SLJIT_R2) { - EMIT_MOV(compiler, TMP_REG1, 0, src, 0); - src = TMP_REG1; - } -#endif - FAIL_IF(call_with_args(compiler, type)); - } - if (src == SLJIT_IMM) { jump = (struct sljit_jump*)ensure_abuf(compiler, sizeof(struct sljit_jump)); FAIL_IF_NULL(jump); @@ -2683,37 +2561,29 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_ijump(struct sljit_compiler *compi SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 dst, sljit_sw dstw, - sljit_s32 src, sljit_sw srcw, sljit_s32 type) { sljit_u8 *inst; sljit_u8 cond_set = 0; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) sljit_s32 reg; -#else - /* CHECK_EXTRA_REGS migh overwrite these values. */ +#endif + /* ADJUST_LOCAL_OFFSET and CHECK_EXTRA_REGS might overwrite these values. */ sljit_s32 dst_save = dst; sljit_sw dstw_save = dstw; -#endif CHECK_ERROR(); - CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, src, srcw, type)); - SLJIT_UNUSED_ARG(srcw); - - if (dst == SLJIT_UNUSED) - return SLJIT_SUCCESS; + CHECK(check_sljit_emit_op_flags(compiler, op, dst, dstw, type)); ADJUST_LOCAL_OFFSET(dst, dstw); CHECK_EXTRA_REGS(dst, dstw, (void)0); - if (SLJIT_UNLIKELY(compiler->flags_saved)) - FAIL_IF(emit_restore_flags(compiler, op & SLJIT_KEEP_FLAGS)); type &= 0xff; /* setcc = jcc + 0x10. */ cond_set = get_jump_code(type) + 0x10; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst) && dst == src) { + if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst)) { inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 + 3); FAIL_IF(!inst); INC_SIZE(4 + 3); @@ -2728,7 +2598,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co return SLJIT_SUCCESS; } - reg = (op == SLJIT_MOV && FAST_IS_REG(dst)) ? dst : TMP_REG1; + reg = (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst)) ? dst : TMP_REG1; inst = (sljit_u8*)ensure_buf(compiler, 1 + 4 + 4); FAIL_IF(!inst); @@ -2739,6 +2609,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co *inst++ = cond_set; *inst++ = MOD_REG | reg_lmap[reg]; *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : (REX_B | REX_R)); + /* The movzx instruction does not affect flags. */ *inst++ = GROUP_0F; *inst++ = MOVZX_r_rm8; *inst = MOD_REG | (reg_lmap[reg] << 3) | reg_lmap[reg]; @@ -2750,12 +2621,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co compiler->mode32 = GET_OPCODE(op) != SLJIT_MOV; return emit_mov(compiler, dst, dstw, TMP_REG1, 0); } + #if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \ || (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) compiler->skip_checks = 1; #endif - return sljit_emit_op2(compiler, op, dst, dstw, dst, dstw, TMP_REG1, 0); -#else /* SLJIT_CONFIG_X86_64 */ + return sljit_emit_op2(compiler, op, dst_save, dstw_save, dst_save, dstw_save, TMP_REG1, 0); + +#else + /* The SLJIT_CONFIG_X86_32 code path starts here. */ if (GET_OPCODE(op) < SLJIT_ADD && FAST_IS_REG(dst)) { if (reg_map[dst] <= 4) { /* Low byte is accessible. */ @@ -2809,8 +2683,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co return SLJIT_SUCCESS; } - if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst) && dst == src && reg_map[dst] <= 4) { - SLJIT_COMPILE_ASSERT(reg_map[SLJIT_R0] == 0, scratch_reg1_must_be_eax); + if (GET_OPCODE(op) == SLJIT_OR && !GET_ALL_FLAGS(op) && FAST_IS_REG(dst) && reg_map[dst] <= 4) { + SLJIT_ASSERT(reg_map[SLJIT_R0] == 0); + if (dst != SLJIT_R0) { inst = (sljit_u8*)ensure_buf(compiler, 1 + 1 + 3 + 2 + 1); FAIL_IF(!inst); @@ -2869,6 +2744,46 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_flags(struct sljit_compiler *co #endif /* SLJIT_CONFIG_X86_64 */ } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_cmov(struct sljit_compiler *compiler, sljit_s32 type, + sljit_s32 dst_reg, + sljit_s32 src, sljit_sw srcw) +{ + sljit_u8* inst; + + CHECK_ERROR(); + CHECK(check_sljit_emit_cmov(compiler, type, dst_reg, src, srcw)); + +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + dst_reg &= ~SLJIT_I32_OP; + + if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV) || (dst_reg >= SLJIT_R3 && dst_reg <= SLJIT_S3)) + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw); +#else + if (!sljit_has_cpu_feature(SLJIT_HAS_CMOV)) + return sljit_emit_cmov_generic(compiler, type, dst_reg, src, srcw); +#endif + + /* ADJUST_LOCAL_OFFSET is not needed. */ + CHECK_EXTRA_REGS(src, srcw, (void)0); + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + compiler->mode32 = dst_reg & SLJIT_I32_OP; + dst_reg &= ~SLJIT_I32_OP; +#endif + + if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw); + src = TMP_REG1; + srcw = 0; + } + + inst = emit_x86_instruction(compiler, 2, dst_reg, 0, src, srcw); + FAIL_IF(!inst); + *inst++ = GROUP_0F; + *inst = get_jump_code(type & 0xff) - 0x40; + return SLJIT_SUCCESS; +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *compiler, sljit_s32 dst, sljit_sw dstw, sljit_sw offset) { CHECK_ERROR(); @@ -2887,16 +2802,16 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_local_base(struct sljit_compiler *c if (NOT_HALFWORD(offset)) { FAIL_IF(emit_load_imm64(compiler, TMP_REG1, offset)); #if (defined SLJIT_DEBUG && SLJIT_DEBUG) - SLJIT_ASSERT(emit_lea_binary(compiler, SLJIT_KEEP_FLAGS, dst, dstw, SLJIT_SP, 0, TMP_REG1, 0) != SLJIT_ERR_UNSUPPORTED); + SLJIT_ASSERT(emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, TMP_REG1, 0) != SLJIT_ERR_UNSUPPORTED); return compiler->error; #else - return emit_lea_binary(compiler, SLJIT_KEEP_FLAGS, dst, dstw, SLJIT_SP, 0, TMP_REG1, 0); + return emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, TMP_REG1, 0); #endif } #endif if (offset != 0) - return emit_lea_binary(compiler, SLJIT_KEEP_FLAGS, dst, dstw, SLJIT_SP, 0, SLJIT_IMM, offset); + return emit_lea_binary(compiler, dst, dstw, SLJIT_SP, 0, SLJIT_IMM, offset); return emit_mov(compiler, dst, dstw, SLJIT_SP, 0); } @@ -2920,14 +2835,11 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_const* sljit_emit_const(struct sljit_compi #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) compiler->mode32 = 0; - reg = SLOW_IS_REG(dst) ? dst : TMP_REG1; + reg = FAST_IS_REG(dst) ? dst : TMP_REG1; if (emit_load_imm64(compiler, reg, init_value)) return NULL; #else - if (dst == SLJIT_UNUSED) - dst = TMP_REG1; - if (emit_mov(compiler, dst, dstw, SLJIT_IMM, init_value)) return NULL; #endif @@ -2962,69 +2874,3 @@ SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_consta SLJIT_UNUSED_ARG(executable_offset); sljit_unaligned_store_sw((void*)addr, new_constant); } - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_x86_is_sse2_available(void) -{ -#if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) - if (cpu_has_sse2 == -1) - get_cpu_features(); - return cpu_has_sse2; -#else - return 1; -#endif -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_x86_is_cmov_available(void) -{ - if (cpu_has_cmov == -1) - get_cpu_features(); - return cpu_has_cmov; -} - -SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_x86_emit_cmov(struct sljit_compiler *compiler, - sljit_s32 type, - sljit_s32 dst_reg, - sljit_s32 src, sljit_sw srcw) -{ - sljit_u8* inst; - - CHECK_ERROR(); -#if (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS) - CHECK_ARGUMENT(sljit_x86_is_cmov_available()); - CHECK_ARGUMENT(!(type & ~(0xff | SLJIT_I32_OP))); - CHECK_ARGUMENT((type & 0xff) >= SLJIT_EQUAL && (type & 0xff) <= SLJIT_ORDERED_F64); - CHECK_ARGUMENT(FUNCTION_CHECK_IS_REG(dst_reg & ~SLJIT_I32_OP)); - FUNCTION_CHECK_SRC(src, srcw); -#endif -#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) - if (SLJIT_UNLIKELY(!!compiler->verbose)) { - fprintf(compiler->verbose, " x86_cmov%s %s%s, ", - !(dst_reg & SLJIT_I32_OP) ? "" : ".i", - jump_names[type & 0xff], JUMP_POSTFIX(type)); - sljit_verbose_reg(compiler, dst_reg & ~SLJIT_I32_OP); - fprintf(compiler->verbose, ", "); - sljit_verbose_param(compiler, src, srcw); - fprintf(compiler->verbose, "\n"); - } -#endif - - ADJUST_LOCAL_OFFSET(src, srcw); - CHECK_EXTRA_REGS(src, srcw, (void)0); - -#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - compiler->mode32 = dst_reg & SLJIT_I32_OP; -#endif - dst_reg &= ~SLJIT_I32_OP; - - if (SLJIT_UNLIKELY(src & SLJIT_IMM)) { - EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, srcw); - src = TMP_REG1; - srcw = 0; - } - - inst = emit_x86_instruction(compiler, 2, dst_reg, 0, src, srcw); - FAIL_IF(!inst); - *inst++ = GROUP_0F; - *inst = get_jump_code(type & 0xff) - 0x40; - return SLJIT_SUCCESS; -} |