|
22 | 22 | #include "zend_ssa.h"
|
23 | 23 | #include "zend_dump.h"
|
24 | 24 | #include "zend_inference.h"
|
| 25 | +#include "zend_worklist.h" |
25 | 26 | #include "Optimizer/zend_optimizer_internal.h"
|
26 | 27 |
|
27 | 28 | static bool dominates(const zend_basic_block *blocks, int a, int b) {
|
@@ -816,23 +817,14 @@ ZEND_API int zend_ssa_rename_op(const zend_op_array *op_array, const zend_op *op
|
816 | 817 | }
|
817 | 818 | /* }}} */
|
818 | 819 |
|
819 |
| -static zend_result zend_ssa_rename(const zend_op_array *op_array, uint32_t build_flags, zend_ssa *ssa, int *var, int n) /* {{{ */ |
| 820 | +static void zend_ssa_rename_in_block(const zend_op_array *op_array, uint32_t build_flags, zend_ssa *ssa, int *var, int n) /* {{{ */ |
820 | 821 | {
|
821 | 822 | zend_basic_block *blocks = ssa->cfg.blocks;
|
822 | 823 | zend_ssa_block *ssa_blocks = ssa->blocks;
|
823 | 824 | zend_ssa_op *ssa_ops = ssa->ops;
|
824 | 825 | int ssa_vars_count = ssa->vars_count;
|
825 | 826 | int i, j;
|
826 | 827 | zend_op *opline, *end;
|
827 |
| - int *tmp = NULL; |
828 |
| - ALLOCA_FLAG(use_heap = 0); |
829 |
| - |
830 |
| - // FIXME: Can we optimize this copying out in some cases? |
831 |
| - if (blocks[n].next_child >= 0) { |
832 |
| - tmp = do_alloca(sizeof(int) * (op_array->last_var + op_array->T), use_heap); |
833 |
| - memcpy(tmp, var, sizeof(int) * (op_array->last_var + op_array->T)); |
834 |
| - var = tmp; |
835 |
| - } |
836 | 828 |
|
837 | 829 | if (ssa_blocks[n].phis) {
|
838 | 830 | zend_ssa_phi *phi = ssa_blocks[n].phis;
|
@@ -916,22 +908,90 @@ static zend_result zend_ssa_rename(const zend_op_array *op_array, uint32_t build
|
916 | 908 | }
|
917 | 909 |
|
918 | 910 | ssa->vars_count = ssa_vars_count;
|
| 911 | +} |
| 912 | +/* }}} */ |
919 | 913 |
|
920 |
| - j = blocks[n].children; |
921 |
| - while (j >= 0) { |
922 |
| - // FIXME: Tail call optimization? |
923 |
| - if (zend_ssa_rename(op_array, build_flags, ssa, var, j) == FAILURE) |
924 |
| - return FAILURE; |
925 |
| - j = blocks[j].next_child; |
| 914 | +static zend_result zend_ssa_rename(const zend_op_array *op_array, uint32_t build_flags, zend_ssa *ssa, int *var, int n) |
| 915 | +{ |
| 916 | + /* The worklist contains block numbers, encoded as positive or negative value. |
| 917 | + * Positive values indicate that the variable rename still needs to happen for the block. |
| 918 | + * Negative values indicate the variable rename was done and all children were handled too. |
| 919 | + * In that case, we will clean up. |
| 920 | + * Because block 0 is valid, we bias the block numbers by adding 1 such that we can distinguish |
| 921 | + * positive and negative values in all cases. */ |
| 922 | + zend_worklist_stack work; |
| 923 | + ALLOCA_FLAG(work_use_heap); |
| 924 | + ZEND_WORKLIST_STACK_ALLOCA(&work, ssa->cfg.blocks_count, work_use_heap); |
| 925 | + zend_worklist_stack_push(&work, n + 1); |
| 926 | + |
| 927 | + /* This is used to backtrack the right version of the renamed variables to use. */ |
| 928 | + ALLOCA_FLAG(save_vars_use_heap); |
| 929 | + unsigned int save_vars_top = 0; |
| 930 | + int **save_vars = do_alloca(sizeof(int *) * (ssa->cfg.blocks_count + 1), save_vars_use_heap); |
| 931 | + save_vars[0] = var; |
| 932 | + |
| 933 | + while (work.len) { |
| 934 | + n = zend_worklist_stack_pop(&work); |
| 935 | + |
| 936 | + /* Enter state: perform SSA variable rename */ |
| 937 | + if (n > 0) { |
| 938 | + n--; |
| 939 | + |
| 940 | + // FIXME: Can we optimize this copying out in some cases? |
| 941 | + int *new_var; |
| 942 | + if (ssa->cfg.blocks[n].next_child >= 0) { |
| 943 | + new_var = emalloc(sizeof(int) * (op_array->last_var + op_array->T)); |
| 944 | + memcpy(new_var, save_vars[save_vars_top], sizeof(int) * (op_array->last_var + op_array->T)); |
| 945 | + save_vars[++save_vars_top] = new_var; |
| 946 | + } else { |
| 947 | + new_var = save_vars[save_vars_top]; |
| 948 | + } |
| 949 | + |
| 950 | + zend_ssa_rename_in_block(op_array, build_flags, ssa, new_var, n); |
| 951 | + |
| 952 | + int j = ssa->cfg.blocks[n].children; |
| 953 | + if (j >= 0) { |
| 954 | + /* Push backtrack state */ |
| 955 | + zend_worklist_stack_push(&work, -(n + 1)); |
| 956 | + |
| 957 | + /* Push children in enter state */ |
| 958 | + unsigned int child_count = 0; |
| 959 | + int len_prior = work.len; |
| 960 | + do { |
| 961 | + zend_worklist_stack_push(&work, j + 1); |
| 962 | + j = ssa->cfg.blocks[j].next_child; |
| 963 | + child_count++; |
| 964 | + } while (j >= 0); |
| 965 | + |
| 966 | + /* Reverse block order to maintain SSA variable number order given in previous PHP versions, |
| 967 | + * but the data structure doesn't allow reverse dominator tree traversal. */ |
| 968 | + for (unsigned int i = 0; i < child_count / 2; i++) { |
| 969 | + int tmp = work.buf[len_prior + i]; |
| 970 | + work.buf[len_prior + i] = work.buf[work.len - 1 - i]; |
| 971 | + work.buf[work.len - 1 - i] = tmp; |
| 972 | + } |
| 973 | + } else { |
| 974 | + /* Leafs jump directly to backtracking */ |
| 975 | + goto backtrack; |
| 976 | + } |
| 977 | + } |
| 978 | + /* Leave state: backtrack */ |
| 979 | + else { |
| 980 | + n = -n; |
| 981 | + n--; |
| 982 | +backtrack:; |
| 983 | + if (ssa->cfg.blocks[n].next_child >= 0) { |
| 984 | + efree(save_vars[save_vars_top]); |
| 985 | + save_vars_top--; |
| 986 | + } |
| 987 | + } |
926 | 988 | }
|
927 | 989 |
|
928 |
| - if (tmp) { |
929 |
| - free_alloca(tmp, use_heap); |
930 |
| - } |
| 990 | + free_alloca(save_vars, save_vars_use_heap); |
| 991 | + ZEND_WORKLIST_STACK_FREE_ALLOCA(&work, work_use_heap); |
931 | 992 |
|
932 | 993 | return SUCCESS;
|
933 | 994 | }
|
934 |
| -/* }}} */ |
935 | 995 |
|
936 | 996 | ZEND_API zend_result zend_build_ssa(zend_arena **arena, const zend_script *script, const zend_op_array *op_array, uint32_t build_flags, zend_ssa *ssa) /* {{{ */
|
937 | 997 | {
|
|
0 commit comments