diff options
Diffstat (limited to 'thirdparty/pcre2/src/sljit/sljitNativeX86_common.c')
-rw-r--r-- | thirdparty/pcre2/src/sljit/sljitNativeX86_common.c | 230 |
1 files changed, 216 insertions, 14 deletions
diff --git a/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c b/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c index 6296da5382..ddcc5ebf76 100644 --- a/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c +++ b/thirdparty/pcre2/src/sljit/sljitNativeX86_common.c @@ -506,7 +506,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil reverse_buf(compiler); /* Second code generation pass. */ - code = (sljit_u8*)SLJIT_MALLOC_EXEC(compiler->size); + code = (sljit_u8*)SLJIT_MALLOC_EXEC(compiler->size, compiler->exec_allocator_data); PTR_FAIL_WITH_EXEC_IF(code); buf = compiler->buf; @@ -557,7 +557,7 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil SLJIT_ASSERT(put_label->label); put_label->addr = (sljit_uw)code_ptr; #if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) - code_ptr = generate_put_label_code(put_label, code_ptr, (sljit_uw)(SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size)); + code_ptr = generate_put_label_code(put_label, code_ptr, (sljit_uw)SLJIT_ADD_EXEC_OFFSET(code, executable_offset) + put_label->label->size); #endif put_label = put_label->next; break; @@ -629,7 +629,11 @@ SLJIT_API_FUNC_ATTRIBUTE void* sljit_generate_code(struct sljit_compiler *compil compiler->error = SLJIT_ERR_COMPILED; compiler->executable_offset = executable_offset; compiler->executable_size = code_ptr - code; - return (void*)(code + executable_offset); + + code = (sljit_u8*)SLJIT_ADD_EXEC_OFFSET(code, executable_offset); + + SLJIT_UPDATE_WX_FLAGS(code, (sljit_u8*)SLJIT_ADD_EXEC_OFFSET(code_ptr, executable_offset), 1); + return (void*)code; } SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) @@ -657,6 +661,9 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_has_cpu_feature(sljit_s32 feature_type) get_cpu_features(); return cpu_has_cmov; + case SLJIT_HAS_PREFETCH: + return 1; + case SLJIT_HAS_SSE2: #if (defined SLJIT_DETECT_SSE2 && SLJIT_DETECT_SSE2) if (cpu_has_sse2 == -1) @@ -702,6 +709,166 @@ static SLJIT_INLINE sljit_s32 emit_sse2_store(struct sljit_compiler *compiler, static SLJIT_INLINE sljit_s32 emit_sse2_load(struct sljit_compiler *compiler, sljit_s32 single, sljit_s32 dst, sljit_s32 src, sljit_sw srcw); +static sljit_s32 emit_cmp_binary(struct sljit_compiler *compiler, + sljit_s32 src1, sljit_sw src1w, + sljit_s32 src2, sljit_sw src2w); + +static SLJIT_INLINE sljit_s32 emit_endbranch(struct sljit_compiler *compiler) +{ +#if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) + /* Emit endbr32/endbr64 when CET is enabled. */ + sljit_u8 *inst; + inst = (sljit_u8*)ensure_buf(compiler, 1 + 4); + FAIL_IF(!inst); + INC_SIZE(4); + *inst++ = 0xf3; + *inst++ = 0x0f; + *inst++ = 0x1e; +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + *inst = 0xfb; +#else + *inst = 0xfa; +#endif +#else /* !SLJIT_CONFIG_X86_CET */ + SLJIT_UNUSED_ARG(compiler); +#endif /* SLJIT_CONFIG_X86_CET */ + return SLJIT_SUCCESS; +} + +#if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) && defined (__SHSTK__) + +static SLJIT_INLINE sljit_s32 emit_rdssp(struct sljit_compiler *compiler, sljit_s32 reg) +{ + sljit_u8 *inst; + sljit_s32 size; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + size = 5; +#else + size = 4; +#endif + + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + INC_SIZE(size); + *inst++ = 0xf3; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : REX_B); +#endif + *inst++ = 0x0f; + *inst++ = 0x1e; + *inst = (0x3 << 6) | (0x1 << 3) | (reg_map[reg] & 0x7); + return SLJIT_SUCCESS; +} + +static SLJIT_INLINE sljit_s32 emit_incssp(struct sljit_compiler *compiler, sljit_s32 reg) +{ + sljit_u8 *inst; + sljit_s32 size; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + size = 5; +#else + size = 4; +#endif + + inst = (sljit_u8*)ensure_buf(compiler, 1 + size); + FAIL_IF(!inst); + INC_SIZE(size); + *inst++ = 0xf3; +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + *inst++ = REX_W | (reg_map[reg] <= 7 ? 0 : REX_B); +#endif + *inst++ = 0x0f; + *inst++ = 0xae; + *inst = (0x3 << 6) | (0x5 << 3) | (reg_map[reg] & 0x7); + return SLJIT_SUCCESS; +} + +#endif /* SLJIT_CONFIG_X86_CET && __SHSTK__ */ + +static SLJIT_INLINE sljit_s32 cpu_has_shadow_stack(void) +{ +#if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) && defined (__SHSTK__) + return _get_ssp() != 0; +#else /* !SLJIT_CONFIG_X86_CET || !__SHSTK__ */ + return 0; +#endif /* SLJIT_CONFIG_X86_CET && __SHSTK__ */ +} + +static SLJIT_INLINE sljit_s32 adjust_shadow_stack(struct sljit_compiler *compiler, + sljit_s32 src, sljit_sw srcw, sljit_s32 base, sljit_sw disp) +{ +#if (defined SLJIT_CONFIG_X86_CET && SLJIT_CONFIG_X86_CET) && defined (__SHSTK__) + sljit_u8 *inst, *jz_after_cmp_inst; + sljit_uw size_jz_after_cmp_inst; + + sljit_uw size_before_rdssp_inst = compiler->size; + + /* Generate "RDSSP TMP_REG1". */ + FAIL_IF(emit_rdssp(compiler, TMP_REG1)); + + /* Load return address on shadow stack into TMP_REG1. */ +#if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) + SLJIT_ASSERT(reg_map[TMP_REG1] == 5); + + /* Hand code unsupported "mov 0x0(%ebp),%ebp". */ + inst = (sljit_u8*)ensure_buf(compiler, 1 + 3); + FAIL_IF(!inst); + INC_SIZE(3); + *inst++ = 0x8b; + *inst++ = 0x6d; + *inst = 0; +#else /* !SLJIT_CONFIG_X86_32 */ + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_MEM1(TMP_REG1), 0); +#endif /* SLJIT_CONFIG_X86_32 */ + + if (src == SLJIT_UNUSED) { + /* Return address is on stack. */ + src = SLJIT_MEM1(base); + srcw = disp; + } + + /* Compare return address against TMP_REG1. */ + FAIL_IF(emit_cmp_binary (compiler, TMP_REG1, 0, src, srcw)); + + /* Generate JZ to skip shadow stack ajdustment when shadow + stack matches normal stack. */ + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); + FAIL_IF(!inst); + INC_SIZE(2); + *inst++ = get_jump_code(SLJIT_EQUAL) - 0x10; + size_jz_after_cmp_inst = compiler->size; + jz_after_cmp_inst = inst; + +#if (defined SLJIT_CONFIG_X86_64 && SLJIT_CONFIG_X86_64) + /* REX_W is not necessary. */ + compiler->mode32 = 1; +#endif + /* Load 1 into TMP_REG1. */ + EMIT_MOV(compiler, TMP_REG1, 0, SLJIT_IMM, 1); + + /* Generate "INCSSP TMP_REG1". */ + FAIL_IF(emit_incssp(compiler, TMP_REG1)); + + /* Jump back to "RDSSP TMP_REG1" to check shadow stack again. */ + inst = (sljit_u8*)ensure_buf(compiler, 1 + 2); + FAIL_IF(!inst); + INC_SIZE(2); + *inst++ = JMP_i8; + *inst = size_before_rdssp_inst - compiler->size; + + *jz_after_cmp_inst = compiler->size - size_jz_after_cmp_inst; +#else /* !SLJIT_CONFIG_X86_CET || !__SHSTK__ */ + SLJIT_UNUSED_ARG(compiler); + SLJIT_UNUSED_ARG(src); + SLJIT_UNUSED_ARG(srcw); + SLJIT_UNUSED_ARG(base); + SLJIT_UNUSED_ARG(disp); +#endif /* SLJIT_CONFIG_X86_CET && __SHSTK__ */ + return SLJIT_SUCCESS; +} + #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) #include "sljitNativeX86_32.c" #else @@ -905,6 +1072,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op0(struct sljit_compiler *compile EMIT_MOV(compiler, SLJIT_R1, 0, TMP_REG1, 0); #endif break; + case SLJIT_ENDBR: + return emit_endbranch(compiler); + case SLJIT_SKIP_FRAMES_BEFORE_RETURN: + return skip_frames_before_return(compiler); } return SLJIT_SUCCESS; @@ -1074,12 +1245,12 @@ static sljit_s32 emit_prefetch(struct sljit_compiler *compiler, sljit_s32 op, *inst++ = GROUP_0F; *inst++ = PREFETCH; - if (op >= SLJIT_MOV_U8 && op <= SLJIT_MOV_S8) - *inst |= (3 << 3); - else if (op >= SLJIT_MOV_U16 && op <= SLJIT_MOV_S16) - *inst |= (2 << 3); - else + if (op == SLJIT_PREFETCH_L1) *inst |= (1 << 3); + else if (op == SLJIT_PREFETCH_L2) + *inst |= (2 << 3); + else if (op == SLJIT_PREFETCH_L3) + *inst |= (3 << 3); return SLJIT_SUCCESS; } @@ -1284,12 +1455,6 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op1(struct sljit_compiler *compile compiler->mode32 = op_flags & SLJIT_I32_OP; #endif - if (dst == SLJIT_UNUSED && !HAS_FLAGS(op)) { - if (op <= SLJIT_MOV_P && (src & SLJIT_MEM)) - return emit_prefetch(compiler, op, src, srcw); - return SLJIT_SUCCESS; - } - op = GET_OPCODE(op); if (op >= SLJIT_MOV && op <= SLJIT_MOV_P) { @@ -2150,6 +2315,10 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile if (!HAS_FLAGS(op)) { if ((src2 & SLJIT_IMM) && emit_lea_binary(compiler, dst, dstw, src1, src1w, SLJIT_IMM, -src2w) != SLJIT_ERR_UNSUPPORTED) return compiler->error; + if (SLOW_IS_REG(dst) && src2 == dst) { + FAIL_IF(emit_non_cum_binary(compiler, BINARY_OPCODE(SUB), dst, 0, dst, 0, src1, src1w)); + return emit_unary(compiler, NEG_rm, dst, 0, dst, 0); + } } if (dst == SLJIT_UNUSED) @@ -2186,6 +2355,33 @@ SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op2(struct sljit_compiler *compile return SLJIT_SUCCESS; } +SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_emit_op_src(struct sljit_compiler *compiler, sljit_s32 op, + sljit_s32 src, sljit_sw srcw) +{ + CHECK_ERROR(); + CHECK(check_sljit_emit_op_src(compiler, op, src, srcw)); + ADJUST_LOCAL_OFFSET(src, srcw); + + CHECK_EXTRA_REGS(src, srcw, (void)0); + + switch (op) { + case SLJIT_FAST_RETURN: + return emit_fast_return(compiler, src, srcw); + case SLJIT_SKIP_FRAMES_BEFORE_FAST_RETURN: + /* Don't adjust shadow stack if it isn't enabled. */ + if (!cpu_has_shadow_stack ()) + return SLJIT_SUCCESS; + return adjust_shadow_stack(compiler, src, srcw, SLJIT_UNUSED, 0); + case SLJIT_PREFETCH_L1: + case SLJIT_PREFETCH_L2: + case SLJIT_PREFETCH_L3: + case SLJIT_PREFETCH_ONCE: + return emit_prefetch(compiler, op, src, srcw); + } + + return SLJIT_SUCCESS; +} + SLJIT_API_FUNC_ATTRIBUTE sljit_s32 sljit_get_register_index(sljit_s32 reg) { CHECK_REG_INDEX(check_sljit_get_register_index(reg)); @@ -2926,15 +3122,21 @@ SLJIT_API_FUNC_ATTRIBUTE struct sljit_put_label* sljit_emit_put_label(struct slj SLJIT_API_FUNC_ATTRIBUTE void sljit_set_jump_addr(sljit_uw addr, sljit_uw new_target, sljit_sw executable_offset) { SLJIT_UNUSED_ARG(executable_offset); + + SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_uw)), 0); #if (defined SLJIT_CONFIG_X86_32 && SLJIT_CONFIG_X86_32) sljit_unaligned_store_sw((void*)addr, new_target - (addr + 4) - (sljit_uw)executable_offset); #else sljit_unaligned_store_sw((void*)addr, (sljit_sw) new_target); #endif + SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_uw)), 1); } SLJIT_API_FUNC_ATTRIBUTE void sljit_set_const(sljit_uw addr, sljit_sw new_constant, sljit_sw executable_offset) { SLJIT_UNUSED_ARG(executable_offset); + + SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_sw)), 0); sljit_unaligned_store_sw((void*)addr, new_constant); + SLJIT_UPDATE_WX_FLAGS((void*)addr, (void*)(addr + sizeof(sljit_sw)), 1); } |