summaryrefslogtreecommitdiff
path: root/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/pcre2/src/sljit/sljitNativeX86_64.c')
-rw-r--r--thirdparty/pcre2/src/sljit/sljitNativeX86_64.c838
1 files changed, 452 insertions, 386 deletions
diff --git a/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c b/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c
index e85b56a61a..f37df6e1bf 100644
--- a/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c
+++ b/thirdparty/pcre2/src/sljit/sljitNativeX86_64.c
@@ -26,6 +26,10 @@
/* x86 64-bit arch dependent functions. */
+/* --------------------------------------------------------------------- */
+/* Operators */
+/* --------------------------------------------------------------------- */
+
static sljit_s32 emit_load_imm64(struct sljit_compiler *compiler, sljit_s32 reg, sljit_sw imm)
{
sljit_u8 *inst;
@@ -34,14 +38,246 @@ static sljit_s32 emit_load_imm64(struct sljit_compiler *compiler, sljit_s32 reg,
FAIL_IF(!inst);
INC_SIZE(2 + sizeof(sljit_sw));
*inst++ = REX_W | ((reg_map[reg] <= 7) ? 0 : REX_B);
- *inst++ = MOV_r_i32 + (reg_map[reg] & 0x7);
+ *inst++ = U8(MOV_r_i32 | (reg_map[reg] & 0x7));
sljit_unaligned_store_sw(inst, imm);
return SLJIT_SUCCESS;
}
+static sljit_s32 emit_do_imm32(struct sljit_compiler *compiler, sljit_u8 rex, sljit_u8 opcode, sljit_sw imm)
+{
+ sljit_u8 *inst;
+ sljit_uw length = (rex ? 2 : 1) + sizeof(sljit_s32);
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + length);
+ FAIL_IF(!inst);
+ INC_SIZE(length);
+ if (rex)
+ *inst++ = rex;
+ *inst++ = opcode;
+ sljit_unaligned_store_s32(inst, (sljit_s32)imm);
+ return SLJIT_SUCCESS;
+}
+
+static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_uw size,
+ /* The register or immediate operand. */
+ sljit_s32 a, sljit_sw imma,
+ /* The general operand (not immediate). */
+ sljit_s32 b, sljit_sw immb)
+{
+ sljit_u8 *inst;
+ sljit_u8 *buf_ptr;
+ sljit_u8 rex = 0;
+ sljit_u8 reg_lmap_b;
+ sljit_uw flags = size;
+ sljit_uw inst_size;
+
+ /* The immediate operand must be 32 bit. */
+ SLJIT_ASSERT(!(a & SLJIT_IMM) || compiler->mode32 || IS_HALFWORD(imma));
+ /* Both cannot be switched on. */
+ SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
+ /* Size flags not allowed for typed instructions. */
+ SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0);
+ /* Both size flags cannot be switched on. */
+ SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
+ /* SSE2 and immediate is not possible. */
+ SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2));
+ SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3)
+ && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66)
+ && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66));
+
+ size &= 0xf;
+ inst_size = size;
+
+ if (!compiler->mode32 && !(flags & EX86_NO_REXW))
+ rex |= REX_W;
+ else if (flags & EX86_REX)
+ rex |= REX;
+
+ if (flags & (EX86_PREF_F2 | EX86_PREF_F3))
+ inst_size++;
+ if (flags & EX86_PREF_66)
+ inst_size++;
+
+ /* Calculate size of b. */
+ inst_size += 1; /* mod r/m byte. */
+ if (b & SLJIT_MEM) {
+ if (!(b & OFFS_REG_MASK)) {
+ if (NOT_HALFWORD(immb)) {
+ PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb));
+ immb = 0;
+ if (b & REG_MASK)
+ b |= TO_OFFS_REG(TMP_REG2);
+ else
+ b |= TMP_REG2;
+ }
+ else if (reg_lmap[b & REG_MASK] == 4)
+ b |= TO_OFFS_REG(SLJIT_SP);
+ }
+
+ if (!(b & REG_MASK))
+ inst_size += 1 + sizeof(sljit_s32); /* SIB byte required to avoid RIP based addressing. */
+ else {
+ if (reg_map[b & REG_MASK] >= 8)
+ rex |= REX_B;
+
+ if (immb != 0 && (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP))) {
+ /* Immediate operand. */
+ if (immb <= 127 && immb >= -128)
+ inst_size += sizeof(sljit_s8);
+ else
+ inst_size += sizeof(sljit_s32);
+ }
+ else if (reg_lmap[b & REG_MASK] == 5)
+ inst_size += sizeof(sljit_s8);
+
+ if (b & OFFS_REG_MASK) {
+ inst_size += 1; /* SIB byte. */
+ if (reg_map[OFFS_REG(b)] >= 8)
+ rex |= REX_X;
+ }
+ }
+ }
+ else if (!(flags & EX86_SSE2_OP2)) {
+ if (reg_map[b] >= 8)
+ rex |= REX_B;
+ }
+ else if (freg_map[b] >= 8)
+ rex |= REX_B;
+
+ if (a & SLJIT_IMM) {
+ if (flags & EX86_BIN_INS) {
+ if (imma <= 127 && imma >= -128) {
+ inst_size += 1;
+ flags |= EX86_BYTE_ARG;
+ } else
+ inst_size += 4;
+ }
+ else if (flags & EX86_SHIFT_INS) {
+ imma &= compiler->mode32 ? 0x1f : 0x3f;
+ if (imma != 1) {
+ inst_size ++;
+ flags |= EX86_BYTE_ARG;
+ }
+ } else if (flags & EX86_BYTE_ARG)
+ inst_size++;
+ else if (flags & EX86_HALF_ARG)
+ inst_size += sizeof(short);
+ else
+ inst_size += sizeof(sljit_s32);
+ }
+ else {
+ SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
+ /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
+ if (!(flags & EX86_SSE2_OP1)) {
+ if (reg_map[a] >= 8)
+ rex |= REX_R;
+ }
+ else if (freg_map[a] >= 8)
+ rex |= REX_R;
+ }
+
+ if (rex)
+ inst_size++;
+
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size);
+ PTR_FAIL_IF(!inst);
+
+ /* Encoding the byte. */
+ INC_SIZE(inst_size);
+ if (flags & EX86_PREF_F2)
+ *inst++ = 0xf2;
+ if (flags & EX86_PREF_F3)
+ *inst++ = 0xf3;
+ if (flags & EX86_PREF_66)
+ *inst++ = 0x66;
+ if (rex)
+ *inst++ = rex;
+ buf_ptr = inst + size;
+
+ /* Encode mod/rm byte. */
+ if (!(flags & EX86_SHIFT_INS)) {
+ if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
+ *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
+
+ if (a & SLJIT_IMM)
+ *buf_ptr = 0;
+ else if (!(flags & EX86_SSE2_OP1))
+ *buf_ptr = U8(reg_lmap[a] << 3);
+ else
+ *buf_ptr = U8(freg_lmap[a] << 3);
+ }
+ else {
+ if (a & SLJIT_IMM) {
+ if (imma == 1)
+ *inst = GROUP_SHIFT_1;
+ else
+ *inst = GROUP_SHIFT_N;
+ } else
+ *inst = GROUP_SHIFT_CL;
+ *buf_ptr = 0;
+ }
+
+ if (!(b & SLJIT_MEM)) {
+ *buf_ptr = U8(*buf_ptr | MOD_REG | (!(flags & EX86_SSE2_OP2) ? reg_lmap[b] : freg_lmap[b]));
+ buf_ptr++;
+ } else if (b & REG_MASK) {
+ reg_lmap_b = reg_lmap[b & REG_MASK];
+
+ if (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP) || reg_lmap_b == 5) {
+ if (immb != 0 || reg_lmap_b == 5) {
+ if (immb <= 127 && immb >= -128)
+ *buf_ptr |= 0x40;
+ else
+ *buf_ptr |= 0x80;
+ }
+
+ if (!(b & OFFS_REG_MASK))
+ *buf_ptr++ |= reg_lmap_b;
+ else {
+ *buf_ptr++ |= 0x04;
+ *buf_ptr++ = U8(reg_lmap_b | (reg_lmap[OFFS_REG(b)] << 3));
+ }
+
+ if (immb != 0 || reg_lmap_b == 5) {
+ if (immb <= 127 && immb >= -128)
+ *buf_ptr++ = U8(immb); /* 8 bit displacement. */
+ else {
+ sljit_unaligned_store_s32(buf_ptr, (sljit_s32)immb); /* 32 bit displacement. */
+ buf_ptr += sizeof(sljit_s32);
+ }
+ }
+ }
+ else {
+ *buf_ptr++ |= 0x04;
+ *buf_ptr++ = U8(reg_lmap_b | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6));
+ }
+ }
+ else {
+ *buf_ptr++ |= 0x04;
+ *buf_ptr++ = 0x25;
+ sljit_unaligned_store_s32(buf_ptr, (sljit_s32)immb); /* 32 bit displacement. */
+ buf_ptr += sizeof(sljit_s32);
+ }
+
+ if (a & SLJIT_IMM) {
+ if (flags & EX86_BYTE_ARG)
+ *buf_ptr = U8(imma);
+ else if (flags & EX86_HALF_ARG)
+ sljit_unaligned_store_s16(buf_ptr, (sljit_s16)imma);
+ else if (!(flags & EX86_SHIFT_INS))
+ sljit_unaligned_store_s32(buf_ptr, (sljit_s32)imma);
+ }
+
+ return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1);
+}
+
+/* --------------------------------------------------------------------- */
+/* Enter / return */
+/* --------------------------------------------------------------------- */
+
static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_ptr)
{
- sljit_s32 type = jump->flags >> TYPE_SHIFT;
+ sljit_uw type = jump->flags >> TYPE_SHIFT;
int short_addr = !(jump->flags & SLJIT_REWRITABLE_JUMP) && !(jump->flags & JUMP_LABEL) && (jump->u.target <= 0xffffffff);
@@ -50,7 +286,7 @@ static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_
if (type < SLJIT_JUMP) {
/* Invert type. */
- *code_ptr++ = get_jump_code(type ^ 0x1) - 0x10;
+ *code_ptr++ = U8(get_jump_code(type ^ 0x1) - 0x10);
*code_ptr++ = short_addr ? (6 + 3) : (10 + 3);
}
@@ -63,13 +299,13 @@ static sljit_u8* generate_far_jump_code(struct sljit_jump *jump, sljit_u8 *code_
else if (short_addr)
sljit_unaligned_store_s32(code_ptr, (sljit_s32)jump->u.target);
else
- sljit_unaligned_store_sw(code_ptr, jump->u.target);
+ sljit_unaligned_store_sw(code_ptr, (sljit_sw)jump->u.target);
code_ptr += short_addr ? sizeof(sljit_s32) : sizeof(sljit_sw);
*code_ptr++ = REX_B;
*code_ptr++ = GROUP_FF;
- *code_ptr++ = MOD_REG | (type >= SLJIT_FAST_CALL ? CALL_rm : JMP_rm) | reg_lmap[TMP_REG2];
+ *code_ptr++ = U8(MOD_REG | (type >= SLJIT_FAST_CALL ? CALL_rm : JMP_rm) | reg_lmap[TMP_REG2]);
return code_ptr;
}
@@ -90,7 +326,7 @@ static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, slji
SLJIT_ASSERT((code_ptr[1] & 0xf8) == MOV_r_i32);
if ((code_ptr[0] & 0x07) != 0) {
- code_ptr[0] = (sljit_u8)(code_ptr[0] & ~0x08);
+ code_ptr[0] = U8(code_ptr[0] & ~0x08);
code_ptr += 2 + sizeof(sljit_s32);
}
else {
@@ -114,9 +350,9 @@ static sljit_u8* generate_put_label_code(struct sljit_put_label *put_label, slji
SLJIT_ASSERT(code_ptr[1] == MOV_rm_r);
- code_ptr[0] = (sljit_u8)(code_ptr[0] & ~0x4);
+ code_ptr[0] = U8(code_ptr[0] & ~0x4);
code_ptr[1] = MOV_rm_i32;
- code_ptr[2] = (sljit_u8)(code_ptr[2] & ~(0x7 << 3));
+ code_ptr[2] = U8(code_ptr[2] & ~(0x7 << 3));
code_ptr = (sljit_u8*)(put_label->addr - (2 + sizeof(sljit_uw)) + sizeof(sljit_s32));
put_label->addr = (sljit_uw)code_ptr;
@@ -128,7 +364,15 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 args, i, tmp, size, saved_register_size;
+ sljit_uw size;
+ sljit_s32 word_arg_count = 0;
+ sljit_s32 saved_arg_count = 0;
+ sljit_s32 saved_regs_size, tmp, i;
+#ifdef _WIN64
+ sljit_s32 saved_float_regs_size;
+ sljit_s32 saved_float_regs_offset = 0;
+ sljit_s32 float_arg_count = 0;
+#endif /* _WIN64 */
sljit_u8 *inst;
CHECK_ERROR();
@@ -140,19 +384,11 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
compiler->mode32 = 0;
-#ifdef _WIN64
- /* Two/four register slots for parameters plus space for xmm6 register if needed. */
- if (fscratches >= 6 || fsaveds >= 1)
- compiler->locals_offset = 6 * sizeof(sljit_sw);
- else
- compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw);
-#endif
-
/* Including the return address saved by the call instruction. */
- saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
+ saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
- tmp = saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - saveds) : SLJIT_FIRST_SAVED_REG;
- for (i = SLJIT_S0; i >= tmp; i--) {
+ tmp = SLJIT_S0 - saveds;
+ for (i = SLJIT_S0; i > tmp; i--) {
size = reg_map[i] >= 8 ? 2 : 1;
inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
FAIL_IF(!inst);
@@ -172,55 +408,75 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
PUSH_REG(reg_lmap[i]);
}
- args = get_arg_count(arg_types);
+#ifdef _WIN64
+ local_size += SLJIT_LOCALS_OFFSET;
+ saved_float_regs_size = GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, 16);
- if (args > 0) {
- size = args * 3;
- inst = (sljit_u8*)ensure_buf(compiler, 1 + size);
- FAIL_IF(!inst);
+ if (saved_float_regs_size > 0) {
+ saved_float_regs_offset = ((local_size + 0xf) & ~0xf);
+ local_size = saved_float_regs_offset + saved_float_regs_size;
+ }
+#else /* !_WIN64 */
+ SLJIT_ASSERT(SLJIT_LOCALS_OFFSET == 0);
+#endif /* _WIN64 */
- INC_SIZE(size);
+ arg_types >>= SLJIT_ARG_SHIFT;
+ while (arg_types > 0) {
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64) {
+ tmp = 0;
#ifndef _WIN64
- if (args > 0) {
- inst[0] = REX_W;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x7 /* rdi */;
- inst += 3;
- }
- if (args > 1) {
- inst[0] = REX_W | REX_R;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_lmap[SLJIT_S1] << 3) | 0x6 /* rsi */;
- inst += 3;
- }
- if (args > 2) {
- inst[0] = REX_W | REX_R;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_lmap[SLJIT_S2] << 3) | 0x2 /* rdx */;
- }
-#else
- if (args > 0) {
- inst[0] = REX_W;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_map[SLJIT_S0] << 3) | 0x1 /* rcx */;
- inst += 3;
- }
- if (args > 1) {
- inst[0] = REX_W;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_map[SLJIT_S1] << 3) | 0x2 /* rdx */;
- inst += 3;
- }
- if (args > 2) {
- inst[0] = REX_W | REX_B;
- inst[1] = MOV_r_rm;
- inst[2] = MOD_REG | (reg_map[SLJIT_S2] << 3) | 0x0 /* r8 */;
+ switch (word_arg_count) {
+ case 0:
+ tmp = SLJIT_R2;
+ break;
+ case 1:
+ tmp = SLJIT_R1;
+ break;
+ case 2:
+ tmp = TMP_REG1;
+ break;
+ default:
+ tmp = SLJIT_R3;
+ break;
+ }
+#else /* !_WIN64 */
+ switch (word_arg_count + float_arg_count) {
+ case 0:
+ tmp = SLJIT_R3;
+ break;
+ case 1:
+ tmp = SLJIT_R1;
+ break;
+ case 2:
+ tmp = SLJIT_R2;
+ break;
+ default:
+ tmp = TMP_REG1;
+ break;
+ }
+#endif /* _WIN64 */
+ if (arg_types & SLJIT_ARG_TYPE_SCRATCH_REG) {
+ if (tmp != SLJIT_R0 + word_arg_count)
+ EMIT_MOV(compiler, SLJIT_R0 + word_arg_count, 0, tmp, 0);
+ } else {
+ EMIT_MOV(compiler, SLJIT_S0 - saved_arg_count, 0, tmp, 0);
+ saved_arg_count++;
+ }
+ word_arg_count++;
+ } else {
+#ifdef _WIN64
+ SLJIT_COMPILE_ASSERT(SLJIT_FR0 == 1, float_register_index_start);
+ float_arg_count++;
+ if (float_arg_count != float_arg_count + word_arg_count)
+ FAIL_IF(emit_sse2_load(compiler, (arg_types & SLJIT_ARG_MASK) == SLJIT_ARG_TYPE_F32,
+ float_arg_count, float_arg_count + word_arg_count, 0));
+#endif /* _WIN64 */
}
-#endif
+ arg_types >>= SLJIT_ARG_SHIFT;
}
- local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size;
+ local_size = ((local_size + saved_regs_size + 0xf) & ~0xf) - saved_regs_size;
compiler->local_size = local_size;
#ifdef _WIN64
@@ -234,44 +490,49 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_enter(struct sljit_compiler *compi
EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -4096 * 3);
}
else {
- EMIT_MOV(compiler, SLJIT_R0, 0, SLJIT_SP, 0);
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, (local_size - 1) >> 12);
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, local_size >> 12);
- SLJIT_ASSERT (reg_map[SLJIT_R0] == 0);
-
- EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_MEM1(SLJIT_R0), -4096);
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_R0, 0, SLJIT_R0, 0, SLJIT_IMM, 4096));
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- TMP_REG1, 0, TMP_REG1, 0, SLJIT_IMM, 1));
+ EMIT_MOV(compiler, TMP_REG2, 0, SLJIT_MEM1(SLJIT_SP), -4096);
+ BINARY_IMM32(SUB, 4096, SLJIT_SP, 0);
+ BINARY_IMM32(SUB, 1, TMP_REG1, 0);
inst = (sljit_u8*)ensure_buf(compiler, 1 + 2);
FAIL_IF(!inst);
INC_SIZE(2);
inst[0] = JNE_i8;
- inst[1] = (sljit_s8) -19;
+ inst[1] = (sljit_u8)-21;
+ local_size &= 0xfff;
}
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -local_size);
+ if (local_size > 0)
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(SLJIT_SP), -local_size);
}
-#endif
+#endif /* _WIN64 */
- if (local_size > 0) {
- FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB),
- SLJIT_SP, 0, SLJIT_SP, 0, SLJIT_IMM, local_size));
- }
+ if (local_size > 0)
+ BINARY_IMM32(SUB, local_size, SLJIT_SP, 0);
#ifdef _WIN64
- /* Save xmm6 register: movaps [rsp + 0x20], xmm6 */
- if (fscratches >= 6 || fsaveds >= 1) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 5);
- FAIL_IF(!inst);
- INC_SIZE(5);
- *inst++ = GROUP_0F;
- sljit_unaligned_store_s32(inst, 0x20247429);
+ if (saved_float_regs_size > 0) {
+ compiler->mode32 = 1;
+
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ inst = emit_x86_instruction(compiler, 2 | EX86_SSE2, i, 0, SLJIT_MEM1(SLJIT_SP), saved_float_regs_offset);
+ *inst++ = GROUP_0F;
+ *inst = MOVAPS_xm_x;
+ saved_float_regs_offset += 16;
+ }
+
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ inst = emit_x86_instruction(compiler, 2 | EX86_SSE2, i, 0, SLJIT_MEM1(SLJIT_SP), saved_float_regs_offset);
+ *inst++ = GROUP_0F;
+ *inst = MOVAPS_xm_x;
+ saved_float_regs_offset += 16;
+ }
}
-#endif
+#endif /* _WIN64 */
return SLJIT_SUCCESS;
}
@@ -280,46 +541,65 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_set_context(struct sljit_compiler *comp
sljit_s32 options, sljit_s32 arg_types, sljit_s32 scratches, sljit_s32 saveds,
sljit_s32 fscratches, sljit_s32 fsaveds, sljit_s32 local_size)
{
- sljit_s32 saved_register_size;
+ sljit_s32 saved_regs_size;
+#ifdef _WIN64
+ sljit_s32 saved_float_regs_size;
+#endif /* _WIN64 */
CHECK_ERROR();
CHECK(check_sljit_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size));
set_set_context(compiler, options, arg_types, scratches, saveds, fscratches, fsaveds, local_size);
#ifdef _WIN64
- /* Two/four register slots for parameters plus space for xmm6 register if needed. */
- if (fscratches >= 6 || fsaveds >= 1)
- compiler->locals_offset = 6 * sizeof(sljit_sw);
- else
- compiler->locals_offset = ((scratches > 2) ? 4 : 2) * sizeof(sljit_sw);
-#endif
+ local_size += SLJIT_LOCALS_OFFSET;
+ saved_float_regs_size = GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, 16);
+
+ if (saved_float_regs_size > 0)
+ local_size = ((local_size + 0xf) & ~0xf) + saved_float_regs_size;
+#else /* !_WIN64 */
+ SLJIT_ASSERT(SLJIT_LOCALS_OFFSET == 0);
+#endif /* _WIN64 */
/* Including the return address saved by the call instruction. */
- saved_register_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
- compiler->local_size = ((local_size + SLJIT_LOCALS_OFFSET + saved_register_size + 15) & ~15) - saved_register_size;
+ saved_regs_size = GET_SAVED_REGISTERS_SIZE(scratches, saveds, 1);
+ compiler->local_size = ((local_size + saved_regs_size + 0xf) & ~0xf) - saved_regs_size;
return SLJIT_SUCCESS;
}
-SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *compiler, sljit_s32 op, sljit_s32 src, sljit_sw srcw)
+static sljit_s32 emit_stack_frame_release(struct sljit_compiler *compiler)
{
- sljit_s32 i, tmp, size;
+ sljit_uw size;
+ sljit_s32 i, tmp;
sljit_u8 *inst;
+#ifdef _WIN64
+ sljit_s32 saved_float_regs_offset;
+ sljit_s32 fscratches = compiler->fscratches;
+ sljit_s32 fsaveds = compiler->fsaveds;
+#endif /* _WIN64 */
- CHECK_ERROR();
- CHECK(check_sljit_emit_return(compiler, op, src, srcw));
+#ifdef _WIN64
+ saved_float_regs_offset = GET_SAVED_FLOAT_REGISTERS_SIZE(fscratches, fsaveds, 16);
- FAIL_IF(emit_mov_before_return(compiler, op, src, srcw));
+ if (saved_float_regs_offset > 0) {
+ compiler->mode32 = 1;
+ saved_float_regs_offset = (compiler->local_size - saved_float_regs_offset) & ~0xf;
+
+ tmp = SLJIT_FS0 - fsaveds;
+ for (i = SLJIT_FS0; i > tmp; i--) {
+ inst = emit_x86_instruction(compiler, 2 | EX86_SSE2, i, 0, SLJIT_MEM1(SLJIT_SP), saved_float_regs_offset);
+ *inst++ = GROUP_0F;
+ *inst = MOVAPS_x_xm;
+ saved_float_regs_offset += 16;
+ }
-#ifdef _WIN64
- /* Restore xmm6 register: movaps xmm6, [rsp + 0x20] */
- if (compiler->fscratches >= 6 || compiler->fsaveds >= 1) {
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 5);
- FAIL_IF(!inst);
- INC_SIZE(5);
- *inst++ = GROUP_0F;
- sljit_unaligned_store_s32(inst, 0x20247428);
+ for (i = fscratches; i >= SLJIT_FIRST_SAVED_FLOAT_REG; i--) {
+ inst = emit_x86_instruction(compiler, 2 | EX86_SSE2, i, 0, SLJIT_MEM1(SLJIT_SP), saved_float_regs_offset);
+ *inst++ = GROUP_0F;
+ *inst = MOVAPS_x_xm;
+ saved_float_regs_offset += 16;
+ }
}
-#endif
+#endif /* _WIN64 */
if (compiler->local_size > 0) {
if (compiler->local_size <= 127) {
@@ -329,7 +609,7 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
*inst++ = REX_W;
*inst++ = GROUP_BINARY_83;
*inst++ = MOD_REG | ADD | 4;
- *inst = compiler->local_size;
+ *inst = U8(compiler->local_size);
}
else {
inst = (sljit_u8*)ensure_buf(compiler, 1 + 7);
@@ -364,243 +644,23 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return(struct sljit_compiler *comp
POP_REG(reg_lmap[i]);
}
- inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
- FAIL_IF(!inst);
- INC_SIZE(1);
- RET();
- return SLJIT_SUCCESS;
-}
-
-/* --------------------------------------------------------------------- */
-/* Operators */
-/* --------------------------------------------------------------------- */
-
-static sljit_s32 emit_do_imm32(struct sljit_compiler *compiler, sljit_u8 rex, sljit_u8 opcode, sljit_sw imm)
-{
- sljit_u8 *inst;
- sljit_s32 length = 1 + (rex ? 1 : 0) + sizeof(sljit_s32);
-
- inst = (sljit_u8*)ensure_buf(compiler, 1 + length);
- FAIL_IF(!inst);
- INC_SIZE(length);
- if (rex)
- *inst++ = rex;
- *inst++ = opcode;
- sljit_unaligned_store_s32(inst, imm);
return SLJIT_SUCCESS;
}
-static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32 size,
- /* The register or immediate operand. */
- sljit_s32 a, sljit_sw imma,
- /* The general operand (not immediate). */
- sljit_s32 b, sljit_sw immb)
+SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_return_void(struct sljit_compiler *compiler)
{
sljit_u8 *inst;
- sljit_u8 *buf_ptr;
- sljit_u8 rex = 0;
- sljit_s32 flags = size & ~0xf;
- sljit_s32 inst_size;
-
- /* The immediate operand must be 32 bit. */
- SLJIT_ASSERT(!(a & SLJIT_IMM) || compiler->mode32 || IS_HALFWORD(imma));
- /* Both cannot be switched on. */
- SLJIT_ASSERT((flags & (EX86_BIN_INS | EX86_SHIFT_INS)) != (EX86_BIN_INS | EX86_SHIFT_INS));
- /* Size flags not allowed for typed instructions. */
- SLJIT_ASSERT(!(flags & (EX86_BIN_INS | EX86_SHIFT_INS)) || (flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) == 0);
- /* Both size flags cannot be switched on. */
- SLJIT_ASSERT((flags & (EX86_BYTE_ARG | EX86_HALF_ARG)) != (EX86_BYTE_ARG | EX86_HALF_ARG));
- /* SSE2 and immediate is not possible. */
- SLJIT_ASSERT(!(a & SLJIT_IMM) || !(flags & EX86_SSE2));
- SLJIT_ASSERT((flags & (EX86_PREF_F2 | EX86_PREF_F3)) != (EX86_PREF_F2 | EX86_PREF_F3)
- && (flags & (EX86_PREF_F2 | EX86_PREF_66)) != (EX86_PREF_F2 | EX86_PREF_66)
- && (flags & (EX86_PREF_F3 | EX86_PREF_66)) != (EX86_PREF_F3 | EX86_PREF_66));
-
- size &= 0xf;
- inst_size = size;
-
- if (!compiler->mode32 && !(flags & EX86_NO_REXW))
- rex |= REX_W;
- else if (flags & EX86_REX)
- rex |= REX;
-
- if (flags & (EX86_PREF_F2 | EX86_PREF_F3))
- inst_size++;
- if (flags & EX86_PREF_66)
- inst_size++;
-
- /* Calculate size of b. */
- inst_size += 1; /* mod r/m byte. */
- if (b & SLJIT_MEM) {
- if (!(b & OFFS_REG_MASK)) {
- if (NOT_HALFWORD(immb)) {
- PTR_FAIL_IF(emit_load_imm64(compiler, TMP_REG2, immb));
- immb = 0;
- if (b & REG_MASK)
- b |= TO_OFFS_REG(TMP_REG2);
- else
- b |= TMP_REG2;
- }
- else if (reg_lmap[b & REG_MASK] == 4)
- b |= TO_OFFS_REG(SLJIT_SP);
- }
-
- if ((b & REG_MASK) == SLJIT_UNUSED)
- inst_size += 1 + sizeof(sljit_s32); /* SIB byte required to avoid RIP based addressing. */
- else {
- if (reg_map[b & REG_MASK] >= 8)
- rex |= REX_B;
-
- if (immb != 0 && (!(b & OFFS_REG_MASK) || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP))) {
- /* Immediate operand. */
- if (immb <= 127 && immb >= -128)
- inst_size += sizeof(sljit_s8);
- else
- inst_size += sizeof(sljit_s32);
- }
- else if (reg_lmap[b & REG_MASK] == 5)
- inst_size += sizeof(sljit_s8);
-
- if ((b & OFFS_REG_MASK) != SLJIT_UNUSED) {
- inst_size += 1; /* SIB byte. */
- if (reg_map[OFFS_REG(b)] >= 8)
- rex |= REX_X;
- }
- }
- }
- else if (!(flags & EX86_SSE2_OP2)) {
- if (reg_map[b] >= 8)
- rex |= REX_B;
- }
- else if (freg_map[b] >= 8)
- rex |= REX_B;
-
- if (a & SLJIT_IMM) {
- if (flags & EX86_BIN_INS) {
- if (imma <= 127 && imma >= -128) {
- inst_size += 1;
- flags |= EX86_BYTE_ARG;
- } else
- inst_size += 4;
- }
- else if (flags & EX86_SHIFT_INS) {
- imma &= compiler->mode32 ? 0x1f : 0x3f;
- if (imma != 1) {
- inst_size ++;
- flags |= EX86_BYTE_ARG;
- }
- } else if (flags & EX86_BYTE_ARG)
- inst_size++;
- else if (flags & EX86_HALF_ARG)
- inst_size += sizeof(short);
- else
- inst_size += sizeof(sljit_s32);
- }
- else {
- SLJIT_ASSERT(!(flags & EX86_SHIFT_INS) || a == SLJIT_PREF_SHIFT_REG);
- /* reg_map[SLJIT_PREF_SHIFT_REG] is less than 8. */
- if (!(flags & EX86_SSE2_OP1)) {
- if (reg_map[a] >= 8)
- rex |= REX_R;
- }
- else if (freg_map[a] >= 8)
- rex |= REX_R;
- }
-
- if (rex)
- inst_size++;
-
- inst = (sljit_u8*)ensure_buf(compiler, 1 + inst_size);
- PTR_FAIL_IF(!inst);
-
- /* Encoding the byte. */
- INC_SIZE(inst_size);
- if (flags & EX86_PREF_F2)
- *inst++ = 0xf2;
- if (flags & EX86_PREF_F3)
- *inst++ = 0xf3;
- if (flags & EX86_PREF_66)
- *inst++ = 0x66;
- if (rex)
- *inst++ = rex;
- buf_ptr = inst + size;
-
- /* Encode mod/rm byte. */
- if (!(flags & EX86_SHIFT_INS)) {
- if ((flags & EX86_BIN_INS) && (a & SLJIT_IMM))
- *inst = (flags & EX86_BYTE_ARG) ? GROUP_BINARY_83 : GROUP_BINARY_81;
- if (a & SLJIT_IMM)
- *buf_ptr = 0;
- else if (!(flags & EX86_SSE2_OP1))
- *buf_ptr = reg_lmap[a] << 3;
- else
- *buf_ptr = freg_lmap[a] << 3;
- }
- else {
- if (a & SLJIT_IMM) {
- if (imma == 1)
- *inst = GROUP_SHIFT_1;
- else
- *inst = GROUP_SHIFT_N;
- } else
- *inst = GROUP_SHIFT_CL;
- *buf_ptr = 0;
- }
-
- if (!(b & SLJIT_MEM))
- *buf_ptr++ |= MOD_REG + ((!(flags & EX86_SSE2_OP2)) ? reg_lmap[b] : freg_lmap[b]);
- else if ((b & REG_MASK) != SLJIT_UNUSED) {
- if ((b & OFFS_REG_MASK) == SLJIT_UNUSED || (b & OFFS_REG_MASK) == TO_OFFS_REG(SLJIT_SP)) {
- if (immb != 0 || reg_lmap[b & REG_MASK] == 5) {
- if (immb <= 127 && immb >= -128)
- *buf_ptr |= 0x40;
- else
- *buf_ptr |= 0x80;
- }
-
- if ((b & OFFS_REG_MASK) == SLJIT_UNUSED)
- *buf_ptr++ |= reg_lmap[b & REG_MASK];
- else {
- *buf_ptr++ |= 0x04;
- *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3);
- }
-
- if (immb != 0 || reg_lmap[b & REG_MASK] == 5) {
- if (immb <= 127 && immb >= -128)
- *buf_ptr++ = immb; /* 8 bit displacement. */
- else {
- sljit_unaligned_store_s32(buf_ptr, immb); /* 32 bit displacement. */
- buf_ptr += sizeof(sljit_s32);
- }
- }
- }
- else {
- if (reg_lmap[b & REG_MASK] == 5)
- *buf_ptr |= 0x40;
- *buf_ptr++ |= 0x04;
- *buf_ptr++ = reg_lmap[b & REG_MASK] | (reg_lmap[OFFS_REG(b)] << 3) | (immb << 6);
- if (reg_lmap[b & REG_MASK] == 5)
- *buf_ptr++ = 0;
- }
- }
- else {
- *buf_ptr++ |= 0x04;
- *buf_ptr++ = 0x25;
- sljit_unaligned_store_s32(buf_ptr, immb); /* 32 bit displacement. */
- buf_ptr += sizeof(sljit_s32);
- }
+ CHECK_ERROR();
+ CHECK(check_sljit_emit_return_void(compiler));
- if (a & SLJIT_IMM) {
- if (flags & EX86_BYTE_ARG)
- *buf_ptr = imma;
- else if (flags & EX86_HALF_ARG)
- sljit_unaligned_store_s16(buf_ptr, imma);
- else if (!(flags & EX86_SHIFT_INS))
- sljit_unaligned_store_s32(buf_ptr, imma);
- }
+ FAIL_IF(emit_stack_frame_release(compiler));
- return !(flags & EX86_SHIFT_INS) ? inst : (inst + 1);
+ inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
+ FAIL_IF(!inst);
+ INC_SIZE(1);
+ RET();
+ return SLJIT_SUCCESS;
}
/* --------------------------------------------------------------------- */
@@ -609,43 +669,38 @@ static sljit_u8* emit_x86_instruction(struct sljit_compiler *compiler, sljit_s32
#ifndef _WIN64
-static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr)
{
sljit_s32 src = src_ptr ? (*src_ptr) : 0;
sljit_s32 word_arg_count = 0;
SLJIT_ASSERT(reg_map[SLJIT_R1] == 6 && reg_map[SLJIT_R3] == 1 && reg_map[TMP_REG1] == 2);
-
- compiler->mode32 = 0;
+ SLJIT_ASSERT(!(src & SLJIT_MEM));
/* Remove return value. */
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- if ((arg_types & SLJIT_DEF_MASK) < SLJIT_ARG_TYPE_F32)
+ if ((arg_types & SLJIT_ARG_MASK) < SLJIT_ARG_TYPE_F64)
word_arg_count++;
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
if (word_arg_count == 0)
return SLJIT_SUCCESS;
- if (src & SLJIT_MEM) {
- ADJUST_LOCAL_OFFSET(src, srcw);
- EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
- *src_ptr = TMP_REG2;
+ if (word_arg_count >= 3) {
+ if (src == SLJIT_R2)
+ *src_ptr = TMP_REG1;
+ EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R2, 0);
}
- else if (src == SLJIT_R2 && word_arg_count >= SLJIT_R2)
- *src_ptr = TMP_REG1;
- if (word_arg_count >= 3)
- EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_R2, 0);
return emit_mov(compiler, SLJIT_R2, 0, SLJIT_R0, 0);
}
#else
-static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr, sljit_sw srcw)
+static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_types, sljit_s32 *src_ptr)
{
sljit_s32 src = src_ptr ? (*src_ptr) : 0;
sljit_s32 arg_count = 0;
@@ -656,16 +711,16 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
static sljit_u8 word_arg_regs[5] = { 0, SLJIT_R3, SLJIT_R1, SLJIT_R2, TMP_REG1 };
SLJIT_ASSERT(reg_map[SLJIT_R3] == 1 && reg_map[SLJIT_R1] == 2 && reg_map[SLJIT_R2] == 8 && reg_map[TMP_REG1] == 9);
+ SLJIT_ASSERT(!(src & SLJIT_MEM));
- compiler->mode32 = 0;
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
while (arg_types) {
- types = (types << SLJIT_DEF_SHIFT) | (arg_types & SLJIT_DEF_MASK);
+ types = (types << SLJIT_ARG_SHIFT) | (arg_types & SLJIT_ARG_MASK);
- switch (arg_types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (arg_types & SLJIT_ARG_MASK) {
case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
arg_count++;
float_arg_count++;
@@ -687,29 +742,23 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
break;
}
- arg_types >>= SLJIT_DEF_SHIFT;
+ arg_types >>= SLJIT_ARG_SHIFT;
}
if (!data_trandfer)
return SLJIT_SUCCESS;
- if (src & SLJIT_MEM) {
- ADJUST_LOCAL_OFFSET(src, srcw);
- EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
- *src_ptr = TMP_REG2;
- }
-
while (types) {
- switch (types & SLJIT_DEF_MASK) {
- case SLJIT_ARG_TYPE_F32:
+ switch (types & SLJIT_ARG_MASK) {
+ case SLJIT_ARG_TYPE_F64:
if (arg_count != float_arg_count)
- FAIL_IF(emit_sse2_load(compiler, 1, arg_count, float_arg_count, 0));
+ FAIL_IF(emit_sse2_load(compiler, 0, arg_count, float_arg_count, 0));
arg_count--;
float_arg_count--;
break;
- case SLJIT_ARG_TYPE_F64:
+ case SLJIT_ARG_TYPE_F32:
if (arg_count != float_arg_count)
- FAIL_IF(emit_sse2_load(compiler, 0, arg_count, float_arg_count, 0));
+ FAIL_IF(emit_sse2_load(compiler, 1, arg_count, float_arg_count, 0));
arg_count--;
float_arg_count--;
break;
@@ -721,7 +770,7 @@ static sljit_s32 call_with_args(struct sljit_compiler *compiler, sljit_s32 arg_t
break;
}
- types >>= SLJIT_DEF_SHIFT;
+ types >>= SLJIT_ARG_SHIFT;
}
return SLJIT_SUCCESS;
@@ -735,13 +784,19 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_jump* sljit_emit_call(struct sljit_compile
CHECK_ERROR_PTR();
CHECK_PTR(check_sljit_emit_call(compiler, type, arg_types));
- PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL, 0));
+ compiler->mode32 = 0;
+
+ PTR_FAIL_IF(call_with_args(compiler, arg_types, NULL));
+
+ if (type & SLJIT_CALL_RETURN) {
+ PTR_FAIL_IF(emit_stack_frame_release(compiler));
+ type = SLJIT_JUMP | (type & SLJIT_REWRITABLE_JUMP);
+ }
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
compiler->skip_checks = 1;
#endif
-
return sljit_emit_jump(compiler, type);
}
@@ -752,7 +807,25 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_icall(struct sljit_compiler *compi
CHECK_ERROR();
CHECK(check_sljit_emit_icall(compiler, type, arg_types, src, srcw));
- FAIL_IF(call_with_args(compiler, arg_types, &src, srcw));
+ compiler->mode32 = 0;
+
+ if (src & SLJIT_MEM) {
+ ADJUST_LOCAL_OFFSET(src, srcw);
+ EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+ src = TMP_REG2;
+ }
+
+ if (type & SLJIT_CALL_RETURN) {
+ if (src >= SLJIT_FIRST_SAVED_REG && src <= SLJIT_S0) {
+ EMIT_MOV(compiler, TMP_REG2, 0, src, srcw);
+ src = TMP_REG2;
+ }
+
+ FAIL_IF(emit_stack_frame_release(compiler));
+ type = SLJIT_JUMP;
+ }
+
+ FAIL_IF(call_with_args(compiler, arg_types, &src));
#if (defined SLJIT_VERBOSE && SLJIT_VERBOSE) \
|| (defined SLJIT_ARGUMENT_CHECKS && SLJIT_ARGUMENT_CHECKS)
@@ -770,10 +843,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_fast_enter(struct sljit_compiler *
CHECK(check_sljit_emit_fast_enter(compiler, dst, dstw));
ADJUST_LOCAL_OFFSET(dst, dstw);
- /* For UNUSED dst. Uncommon, but possible. */
- if (dst == SLJIT_UNUSED)
- dst = TMP_REG1;
-
if (FAST_IS_REG(dst)) {
if (reg_map[dst] < 8) {
inst = (sljit_u8*)ensure_buf(compiler, 1 + 1);
@@ -850,9 +919,6 @@ static sljit_s32 emit_mov_int(struct sljit_compiler *compiler, sljit_s32 sign,
compiler->mode32 = 0;
- if (dst == SLJIT_UNUSED && !(src & SLJIT_MEM))
- return SLJIT_SUCCESS; /* Empty instruction. */
-
if (src & SLJIT_IMM) {
if (FAST_IS_REG(dst)) {
if (sign || ((sljit_uw)srcw <= 0x7fffffff)) {
@@ -903,16 +969,16 @@ static sljit_s32 skip_frames_before_return(struct sljit_compiler *compiler)
sljit_s32 tmp, size;
/* Don't adjust shadow stack if it isn't enabled. */
- if (!cpu_has_shadow_stack ())
+ if (!cpu_has_shadow_stack())
return SLJIT_SUCCESS;
size = compiler->local_size;
tmp = compiler->scratches;
if (tmp >= SLJIT_FIRST_SAVED_REG)
- size += (tmp - SLJIT_FIRST_SAVED_REG + 1) * sizeof(sljit_uw);
+ size += (tmp - SLJIT_FIRST_SAVED_REG + 1) * SSIZE_OF(sw);
tmp = compiler->saveds < SLJIT_NUMBER_OF_SAVED_REGISTERS ? (SLJIT_S0 + 1 - compiler->saveds) : SLJIT_FIRST_SAVED_REG;
if (SLJIT_S0 >= tmp)
- size += (SLJIT_S0 - tmp + 1) * sizeof(sljit_uw);
+ size += (SLJIT_S0 - tmp + 1) * SSIZE_OF(sw);
- return adjust_shadow_stack(compiler, SLJIT_UNUSED, 0, SLJIT_SP, size);
+ return adjust_shadow_stack(compiler, SLJIT_MEM1(SLJIT_SP), size);
}