diff --git a/ext/opcache/jit/ir/ir_builder.h b/ext/opcache/jit/ir/ir_builder.h index ba1924f8d65e5..358ae241e2dd4 100644 --- a/ext/opcache/jit/ir/ir_builder.h +++ b/ext/opcache/jit/ir/ir_builder.h @@ -490,20 +490,20 @@ extern "C" { #define ir_ADD_OFFSET(_addr, _offset) _ir_ADD_OFFSET(_ir_CTX, (_addr), (_offset)) /* Unfoldable variant of COPY */ -#define ir_HARD_COPY(_type, _op1) ir_BINARY_OP(IR_COPY, (_type), (_op1), 1) -#define ir_HARD_COPY_B(_op1) ir_BINARY_OP_B(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_U8(_op1) ir_BINARY_OP_U8(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_U16(_op1) ir_BINARY_OP_U16(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_U32(_op1) ir_BINARY_OP_U32(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_U64(_op1) ir_BINARY_OP_U64(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_A(_op1) ir_BINARY_OP_A(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_C(_op1) ir_BINARY_OP_C(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_I8(_op1) ir_BINARY_OP_I8(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_I16(_op1) ir_BINARY_OP_I16(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_I32(_op1) ir_BINARY_OP_I32(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_I64(_op1) ir_BINARY_OP_I64(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_D(_op1) ir_BINARY_OP_D(IR_COPY, (_op1), 1) -#define ir_HARD_COPY_F(_op1) ir_BINARY_OP_F(IR_COPY, (_op1), 1) +#define ir_HARD_COPY(_type, _op1) ir_emit2(_ir_CTX, IR_OPT(IR_COPY, (_type)), (_op1), 1) +#define ir_HARD_COPY_B(_op1) ir_HARD_COPY(IR_BOOL, _op1) +#define ir_HARD_COPY_U8(_op1) ir_HARD_COPY(IR_U8, _op1) +#define ir_HARD_COPY_U16(_op1) ir_HARD_COPY(IR_U16, _op1) +#define ir_HARD_COPY_U32(_op1) ir_HARD_COPY(IR_U32, _op1) +#define ir_HARD_COPY_U64(_op1) ir_HARD_COPY(IR_U64, _op1) +#define ir_HARD_COPY_A(_op1) ir_HARD_COPY(IR_ADDR, _op1) +#define ir_HARD_COPY_C(_op1) ir_HARD_COPY(IR_CHAR, _op1) +#define ir_HARD_COPY_I8(_op1) ir_HARD_COPY(IR_I8, _op1) +#define ir_HARD_COPY_I16(_op1) ir_HARD_COPY(IR_I16, _op1) +#define ir_HARD_COPY_I32(_op1) ir_HARD_COPY(IR_I32, _op1) +#define ir_HARD_COPY_I64(_op1) ir_HARD_COPY(IR_I64, _op1) +#define ir_HARD_COPY_D(_op1) ir_HARD_COPY(IR_DOUBLE, _op1) +#define ir_HARD_COPY_F(_op1) ir_HARD_COPY(IR_FLOAT, _op1) #define ir_PARAM(_type, _name, _num) _ir_PARAM(_ir_CTX, (_type), (_name), (_num)) #define ir_VAR(_type, _name) _ir_VAR(_ir_CTX, (_type), (_name)) diff --git a/ext/opcache/jit/ir/ir_fold.h b/ext/opcache/jit/ir/ir_fold.h index 90112214d0c8b..2f5be6ca2e00b 100644 --- a/ext/opcache/jit/ir/ir_fold.h +++ b/ext/opcache/jit/ir/ir_fold.h @@ -924,7 +924,7 @@ IR_FOLD(SHL(C_CHAR, C_CHAR)) IR_FOLD(SHL(C_I8, C_I8)) { IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type); - IR_FOLD_CONST_I(op1_insn->val.i8 << op2_insn->val.i8); + IR_FOLD_CONST_I((int8_t)(op1_insn->val.u8 << op2_insn->val.u8)); } IR_FOLD(SHL(C_U16, C_U16)) @@ -936,7 +936,7 @@ IR_FOLD(SHL(C_U16, C_U16)) IR_FOLD(SHL(C_I16, C_I16)) { IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type); - IR_FOLD_CONST_I(op1_insn->val.i16 << op2_insn->val.i16); + IR_FOLD_CONST_I((int16_t)(op1_insn->val.u16 << op2_insn->val.u16)); } IR_FOLD(SHL(C_U32, C_U32)) @@ -948,7 +948,7 @@ IR_FOLD(SHL(C_U32, C_U32)) IR_FOLD(SHL(C_I32, C_I32)) { IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type); - IR_FOLD_CONST_I(op1_insn->val.i32 << op2_insn->val.i32); + IR_FOLD_CONST_I((int32_t)(op1_insn->val.u32 << op2_insn->val.u32)); } IR_FOLD(SHL(C_U64, C_U64)) @@ -980,7 +980,7 @@ IR_FOLD(SHR(C_U16, C_U16)) IR_FOLD(SHR(C_I16, C_I16)) { IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type); - IR_FOLD_CONST_U((int16_t)(op1_insn->val.u16 >> op2_insn->val.u16)); + IR_FOLD_CONST_I((int16_t)(op1_insn->val.u16 >> op2_insn->val.u16)); } IR_FOLD(SHR(C_U32, C_U32)) @@ -992,7 +992,7 @@ IR_FOLD(SHR(C_U32, C_U32)) IR_FOLD(SHR(C_I32, C_I32)) { IR_ASSERT(IR_OPT_TYPE(opt) == op1_insn->type); - IR_FOLD_CONST_U((int32_t)(op1_insn->val.u32 >> op2_insn->val.u32)); + IR_FOLD_CONST_I((int32_t)(op1_insn->val.u32 >> op2_insn->val.u32)); } IR_FOLD(SHR(C_U64, C_U64)) @@ -2728,7 +2728,7 @@ IR_FOLD(ADD(ADD, C_ADDR)) /* (x + c1) + c2 => x + (c1 + c2) */ val.u64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64; op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2743,7 +2743,7 @@ IR_FOLD(ADD(ADD, C_I64)) /* (x + c1) + c2 => x + (c1 + c2) */ val.i64 = ctx->ir_base[op1_insn->op2].val.u64 + op2_insn->val.u64; op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2759,14 +2759,14 @@ IR_FOLD(ADD(SUB, C_ADDR)) /* (x - c1) + c2 => x + (c2 - c1) */ val.u64 = op2_insn->val.u64 - ctx->ir_base[op1_insn->op2].val.u64; op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) { /* (c1 - x) + c2 => (c1 + c2) - x */ val.u64 = ctx->ir_base[op1_insn->op1].val.u64 + op2_insn->val.u64; opt++; /* ADD -> SUB */ op2 = op1_insn->op2; - op1 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op1 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2785,14 +2785,14 @@ IR_FOLD(ADD(SUB, C_I64)) opt++; /* ADD -> SUB */ } op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) { /* (c1 - x) + c2 => (c1 + c2) - x */ val.i64 = ctx->ir_base[op1_insn->op1].val.u64 + op2_insn->val.u64; opt++; /* ADD -> SUB */ op2 = op1_insn->op2; - op1 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op1 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2809,7 +2809,7 @@ IR_FOLD(SUB(ADD, C_ADDR)) val.u64 = ctx->ir_base[op1_insn->op2].val.u64 - op2_insn->val.u64; opt--; /* SUB -> ADD */ op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2829,7 +2829,7 @@ IR_FOLD(SUB(ADD, C_I64)) opt--; /* SUB -> ADD */ } op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2845,7 +2845,7 @@ IR_FOLD(SUB(C_ADDR, ADD)) /* c1 - (x + c2) => (c1 - c2) - x */ val.u64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op2].val.u64; op2 = op2_insn->op1; - op1 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op1 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2860,7 +2860,7 @@ IR_FOLD(SUB(C_I64, ADD)) /* c1 - (x + c2) => (c1 - c2) - x */ val.i64 = op1_insn->val.u64 - ctx->ir_base[op2_insn->op2].val.u64; op2 = op2_insn->op1; - op1 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op1 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2880,13 +2880,13 @@ IR_FOLD(SUB(SUB, C_ADDR)) opt--; /* SUB -> ADD */ } op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) { /* (c1 - x) - c2 => (c1 - c2) - x */ val.u64 = ctx->ir_base[op1_insn->op1].val.u64 - op2_insn->val.u64; op2 = op1_insn->op2; - op1 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op1 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2905,13 +2905,13 @@ IR_FOLD(SUB(SUB, C_I64)) opt--; /* SUB -> ADD */ } op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } else if (IR_IS_CONST_REF(op1_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op1_insn->op1].op)) { /* (c1 - x) - c2 => (c1 - c2) - x */ val.i64 = ctx->ir_base[op1_insn->op1].val.u64 - op2_insn->val.u64; op2 = op1_insn->op2; - op1 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op1 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2927,7 +2927,7 @@ IR_FOLD(SUB(C_ADDR, SUB)) /* c1 - (x - c2) => (c1 + c2) - x */ val.u64 = op1_insn->val.u64 + ctx->ir_base[op2_insn->op2].val.u64; op2 = op2_insn->op1; - op1 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op1 = ir_const(ctx, val, op1_insn->op1); IR_FOLD_RESTART; } else if (IR_IS_CONST_REF(op2_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op1].op)) { /* c1 - (c2 - x) => x + (c1 - c2) */ @@ -2937,7 +2937,7 @@ IR_FOLD(SUB(C_ADDR, SUB)) opt++; /* ADD -> SUB */ } op1 = op2_insn->op2; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op1_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2952,7 +2952,7 @@ IR_FOLD(SUB(C_I64, SUB)) /* c1 - (x - c2) => (c1 + c2) - x */ val.i64 = op1_insn->val.u64 + ctx->ir_base[op2_insn->op2].val.u64; op2 = op2_insn->op1; - op1 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op1 = ir_const(ctx, val, op1_insn->type); IR_FOLD_RESTART; } else if (IR_IS_CONST_REF(op2_insn->op1) && !IR_IS_SYM_CONST(ctx->ir_base[op2_insn->op1].op)) { /* c1 - (c2 - x) => x + (c1 - c2) */ @@ -2962,7 +2962,7 @@ IR_FOLD(SUB(C_I64, SUB)) opt++; /* ADD -> SUB */ } op1 = op2_insn->op2; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op1_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2978,7 +2978,7 @@ IR_FOLD(MUL(MUL, C_ADDR)) /* (x * c1) * c2 => x * (c1 * c2) */ val.u64 = ctx->ir_base[op1_insn->op2].val.u64 * op2_insn->val.u64; op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -2993,7 +2993,7 @@ IR_FOLD(MUL(MUL, C_I64)) /* (x * c1) * c2 => x * (c1 * c2) */ val.i64 = ctx->ir_base[op1_insn->op2].val.u64 * op2_insn->val.u64; op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -3013,7 +3013,7 @@ IR_FOLD(AND(AND, C_ADDR)) /* (x & c1) & c2 => x & (c1 & c2) */ val.u64 = ctx->ir_base[op1_insn->op2].val.u64 & op2_insn->val.u64; op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -3033,7 +3033,7 @@ IR_FOLD(OR(OR, C_ADDR)) /* (x | c1) | c2 => x | (c1 | c2) */ val.u64 = ctx->ir_base[op1_insn->op2].val.u64 | op2_insn->val.u64; op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; @@ -3053,7 +3053,7 @@ IR_FOLD(XOR(XOR, C_ADDR)) /* (x ^ c1) ^ c2 => x ^ (c1 ^ c2) */ val.u64 = ctx->ir_base[op1_insn->op2].val.u64 ^ op2_insn->val.u64; op1 = op1_insn->op1; - op2 = ir_const(ctx, val, IR_OPT_TYPE(opt)); + op2 = ir_const(ctx, val, op2_insn->type); IR_FOLD_RESTART; } IR_FOLD_NEXT; diff --git a/ext/opcache/jit/ir/ir_gcm.c b/ext/opcache/jit/ir/ir_gcm.c index 4d518d20079fb..3ea78850249ab 100644 --- a/ext/opcache/jit/ir/ir_gcm.c +++ b/ext/opcache/jit/ir/ir_gcm.c @@ -1307,14 +1307,17 @@ int ir_schedule(ir_ctx *ctx) new_insn->proto = ir_strl(&new_ctx, proto, len); } } else if (new_insn->op == IR_FUNC) { - new_insn->val.u64 = ir_str(&new_ctx, ir_get_str(ctx, new_insn->val.name)); + size_t len; + const char *name = ir_get_strl(ctx, new_insn->val.name, &len); + new_insn->val.u64 = ir_strl(&new_ctx, name, len); if (new_insn->proto) { - size_t len; const char *proto = ir_get_strl(ctx, new_insn->proto, &len); new_insn->proto = ir_strl(&new_ctx, proto, len); } } else if (new_insn->op == IR_SYM || new_insn->op == IR_STR) { - new_insn->val.u64 = ir_str(&new_ctx, ir_get_str(ctx, new_insn->val.name)); + size_t len; + const char *str = ir_get_strl(ctx, new_insn->val.name, &len); + new_insn->val.u64 = ir_strl(&new_ctx, str, len); } new_insn++; ref++; @@ -1339,16 +1342,19 @@ int ir_schedule(ir_ctx *ctx) new_insn->proto = 0; } } else if (insn->op == IR_FUNC) { - new_insn->val.u64 = ir_str(&new_ctx, ir_get_str(ctx, insn->val.name)); + size_t len; + const char *name = ir_get_strl(ctx, insn->val.name, &len); + new_insn->val.u64 = ir_strl(&new_ctx, name, len); if (insn->proto) { - size_t len; const char *proto = ir_get_strl(ctx, insn->proto, &len); new_insn->proto = ir_strl(&new_ctx, proto, len); } else { new_insn->proto = 0; } } else if (insn->op == IR_SYM || insn->op == IR_STR) { - new_insn->val.u64 = ir_str(&new_ctx, ir_get_str(ctx, insn->val.name)); + size_t len; + const char *str = ir_get_strl(ctx, insn->val.name, &len); + new_insn->val.u64 = ir_strl(&new_ctx, str, len); } else { new_insn->val.u64 = insn->val.u64; } @@ -1413,12 +1419,10 @@ int ir_schedule(ir_ctx *ctx) break; case 1: new_insn->op1 = _xlat[insn->op1]; - if (new_insn->op == IR_PARAM || insn->op == IR_VAR) { - new_insn->op2 = ir_str(&new_ctx, ir_get_str(ctx, insn->op2)); - } else if (new_insn->op == IR_PROTO) { + if (new_insn->op == IR_PARAM || new_insn->op == IR_VAR || new_insn->op == IR_PROTO) { size_t len; - const char *proto = ir_get_strl(ctx, insn->op2, &len); - new_insn->op2 = ir_strl(&new_ctx, proto, len); + const char *str = ir_get_strl(ctx, insn->op2, &len); + new_insn->op2 = ir_strl(&new_ctx, str, len); } else { new_insn->op2 = insn->op2; } diff --git a/ext/opcache/jit/ir/ir_sccp.c b/ext/opcache/jit/ir/ir_sccp.c index ae0eebadee54a..58de0d726f781 100644 --- a/ext/opcache/jit/ir/ir_sccp.c +++ b/ext/opcache/jit/ir/ir_sccp.c @@ -2995,6 +2995,7 @@ static bool ir_try_split_if(ir_ctx *ctx, ir_ref ref, ir_insn *insn, ir_bitqueue end2->optx = IR_OPTX(IR_IF, IR_VOID, 2); end2->op2 = cond->op3; + ir_bitqueue_add(worklist, end2_ref); merge->optx = IR_OPTX(op, IR_VOID, 1); merge->op1 = end2_ref; @@ -3190,6 +3191,7 @@ static bool ir_try_split_if_cmp(ir_ctx *ctx, ir_ref ref, ir_insn *insn, ir_bitqu end2->optx = IR_OPTX(IR_IF, IR_VOID, 2); end2->op2 = insn->op2; + ir_bitqueue_add(worklist, end2_ref); merge->optx = IR_OPTX(op, IR_VOID, 1); merge->op1 = end2_ref; diff --git a/ext/opcache/jit/zend_jit_trace.c b/ext/opcache/jit/zend_jit_trace.c index d0f8fabe6e9c1..66bb380c23d8d 100644 --- a/ext/opcache/jit/zend_jit_trace.c +++ b/ext/opcache/jit/zend_jit_trace.c @@ -4095,6 +4095,7 @@ static zend_vm_opcode_handler_t zend_jit_trace(zend_jit_trace_rec *trace_buffer, zend_jit_trace_rec *p; zend_jit_op_array_trace_extension *jit_extension; int num_op_arrays = 0; + bool do_bailout = 0; zend_jit_trace_info *t; const zend_op_array *op_arrays[ZEND_JIT_TRACE_MAX_FUNCS]; uint8_t smart_branch_opcode; @@ -4125,6 +4126,8 @@ static zend_vm_opcode_handler_t zend_jit_trace(zend_jit_trace_rec *trace_buffer, checkpoint = zend_arena_checkpoint(CG(arena)); + zend_try { + ssa = zend_jit_trace_build_tssa(trace_buffer, parent_trace, exit_num, script, op_arrays, &num_op_arrays); if (!ssa) { @@ -7358,6 +7361,10 @@ static zend_vm_opcode_handler_t zend_jit_trace(zend_jit_trace_rec *trace_buffer, zend_string_release(name); } + } zend_catch { + do_bailout = 1; + } zend_end_try(); + jit_cleanup: /* Clean up used op_arrays */ while (num_op_arrays > 0) { @@ -7378,6 +7385,10 @@ static zend_vm_opcode_handler_t zend_jit_trace(zend_jit_trace_rec *trace_buffer, JIT_G(current_frame) = NULL; JIT_G(current_trace) = NULL; + if (do_bailout) { + zend_bailout(); + } + return handler; }