@@ -745,6 +745,112 @@ enum {
745
745
OSWB_MAX_DEPTH = 5 , // up to 5 different classes
746
746
};
747
747
748
+ // Codegen for setting an instance variable.
749
+ // Preconditions:
750
+ // - receiver is in REG0
751
+ // - receiver has the same class as CLASS_OF(comptime_receiver)
752
+ // - no stack push or pops to ctx since the entry to the codegen of the instruction being compiled
753
+ static codegen_status_t
754
+ gen_set_ivar (jitstate_t * jit , ctx_t * ctx , const int max_chain_depth , VALUE comptime_receiver , ID ivar_name , insn_opnd_t reg0_opnd , uint8_t * side_exit )
755
+ {
756
+ VALUE comptime_val_klass = CLASS_OF (comptime_receiver );
757
+ const ctx_t starting_context = * ctx ; // make a copy for use with jit_chain_guard
758
+
759
+ // If the class uses the default allocator, instances should all be T_OBJECT
760
+ // NOTE: This assumes nobody changes the allocator of the class after allocation.
761
+ // Eventually, we can encode whether an object is T_OBJECT or not
762
+ // inside object shapes.
763
+ if (rb_get_alloc_func (comptime_val_klass ) != rb_class_allocate_instance ) {
764
+ GEN_COUNTER_INC (cb , setivar_not_object );
765
+ return YJIT_CANT_COMPILE ;
766
+ }
767
+ RUBY_ASSERT (BUILTIN_TYPE (comptime_receiver ) == T_OBJECT ); // because we checked the allocator
768
+
769
+ // ID for the name of the ivar
770
+ ID id = ivar_name ;
771
+ struct rb_iv_index_tbl_entry * ent ;
772
+ struct st_table * iv_index_tbl = ROBJECT_IV_INDEX_TBL (comptime_receiver );
773
+
774
+ // Bail if this is a heap object, because this needs a write barrier
775
+ ADD_COMMENT (cb , "guard value is immediate" );
776
+ test (cb , REG1 , imm_opnd (RUBY_IMMEDIATE_MASK ));
777
+ jz_ptr (cb , COUNTED_EXIT (side_exit , setivar_val_heapobject ));
778
+
779
+ // Lookup index for the ivar the instruction loads
780
+ if (iv_index_tbl && rb_iv_index_tbl_lookup (iv_index_tbl , id , & ent )) {
781
+ uint32_t ivar_index = ent -> index ;
782
+
783
+ x86opnd_t val_to_write = ctx_stack_pop (ctx , 1 );
784
+ mov (cb , REG1 , val_to_write );
785
+
786
+ x86opnd_t flags_opnd = member_opnd (REG0 , struct RBasic , flags );
787
+
788
+ // Bail if this object is frozen
789
+ ADD_COMMENT (cb , "guard self is not frozen" );
790
+ test (cb , flags_opnd , imm_opnd (RUBY_FL_FREEZE ));
791
+ jz_ptr (cb , COUNTED_EXIT (side_exit , setivar_frozen ));
792
+
793
+ // Pop receiver if it's on the temp stack
794
+ if (!reg0_opnd .is_self ) {
795
+ (void )ctx_stack_pop (ctx , 1 );
796
+ }
797
+
798
+ // Compile time self is embedded and the ivar index lands within the object
799
+ if (RB_FL_TEST_RAW (comptime_receiver , ROBJECT_EMBED ) && ivar_index < ROBJECT_EMBED_LEN_MAX ) {
800
+ // See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
801
+
802
+ // Guard that self is embedded
803
+ // TODO: BT and JC is shorter
804
+ ADD_COMMENT (cb , "guard embedded setivar" );
805
+ test (cb , flags_opnd , imm_opnd (ROBJECT_EMBED ));
806
+ jit_chain_guard (JCC_JZ , jit , & starting_context , max_chain_depth , side_exit );
807
+
808
+ // Load the variable
809
+ x86opnd_t ivar_opnd = mem_opnd (64 , REG0 , offsetof(struct RObject , as .ary ) + ivar_index * SIZEOF_VALUE );
810
+
811
+ mov (cb , ivar_opnd , REG1 );
812
+
813
+ // Push the ivar on the stack
814
+ // For attr_writer we'll need to push the value on the stack
815
+ //x86opnd_t out_opnd = ctx_stack_push(ctx, TYPE_UNKNOWN);
816
+ }
817
+ else {
818
+ // Compile time value is *not* embeded.
819
+
820
+ // Guard that value is *not* embedded
821
+ // See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
822
+ ADD_COMMENT (cb , "guard extended setivar" );
823
+ x86opnd_t flags_opnd = member_opnd (REG0 , struct RBasic , flags );
824
+ test (cb , flags_opnd , imm_opnd (ROBJECT_EMBED ));
825
+ jit_chain_guard (JCC_JNZ , jit , & starting_context , max_chain_depth , side_exit );
826
+
827
+ // check that the extended table is big enough
828
+ if (ivar_index >= ROBJECT_EMBED_LEN_MAX + 1 ) {
829
+ // Check that the slot is inside the extended table (num_slots > index)
830
+ x86opnd_t num_slots = mem_opnd (32 , REG0 , offsetof(struct RObject , as .heap .numiv ));
831
+ cmp (cb , num_slots , imm_opnd (ivar_index ));
832
+ jle_ptr (cb , COUNTED_EXIT (side_exit , setivar_idx_out_of_range ));
833
+ }
834
+
835
+ // Get a pointer to the extended table
836
+ x86opnd_t tbl_opnd = mem_opnd (64 , REG0 , offsetof(struct RObject , as .heap .ivptr ));
837
+ mov (cb , REG0 , tbl_opnd );
838
+
839
+ // Write the ivar to the extended table
840
+ x86opnd_t ivar_opnd = mem_opnd (64 , REG0 , sizeof (VALUE ) * ivar_index );
841
+ mov (cb , REG1 , val_to_write );
842
+ mov (cb , ivar_opnd , REG1 );
843
+ }
844
+
845
+ // Jump to next instruction. This allows guard chains to share the same successor.
846
+ jit_jump_to_next_insn (jit , ctx );
847
+ return YJIT_END_BLOCK ;
848
+ }
849
+
850
+ GEN_COUNTER_INC (cb , setivar_name_not_mapped );
851
+ return YJIT_CANT_COMPILE ;
852
+ }
853
+
748
854
// Codegen for getting an instance variable.
749
855
// Preconditions:
750
856
// - receiver is in REG0
@@ -867,7 +973,7 @@ gen_getinstancevariable(jitstate_t *jit, ctx_t *ctx)
867
973
868
974
// Guard that the receiver has the same class as the one from compile time.
869
975
mov (cb , REG0 , member_opnd (REG_CFP , rb_control_frame_t , self ));
870
- guard_self_is_heap (cb , REG0 , side_exit , ctx );
976
+ guard_self_is_heap (cb , REG0 , COUNTED_EXIT ( side_exit , getivar_se_self_not_heap ) , ctx );
871
977
872
978
jit_guard_known_klass (jit , ctx , comptime_val_klass , OPND_SELF , GETIVAR_MAX_DEPTH , side_exit );
873
979
@@ -877,69 +983,27 @@ gen_getinstancevariable(jitstate_t *jit, ctx_t *ctx)
877
983
static codegen_status_t
878
984
gen_setinstancevariable (jitstate_t * jit , ctx_t * ctx )
879
985
{
880
- IVC ic = (IVC )jit_get_arg (jit , 1 );
881
-
882
- // Check that the inline cache has been set, slot index is known
883
- if (!ic -> entry ) {
884
- return YJIT_CANT_COMPILE ;
986
+ // Defer compilation so we can specialize on a runtime `self`
987
+ if (!jit_at_current_insn (jit )) {
988
+ defer_compilation (jit -> block , jit -> insn_idx , ctx );
989
+ return YJIT_END_BLOCK ;
885
990
}
886
991
887
- // If the class uses the default allocator, instances should all be T_OBJECT
888
- // NOTE: This assumes nobody changes the allocator of the class after allocation.
889
- // Eventually, we can encode whether an object is T_OBJECT or not
890
- // inside object shapes.
891
- if (rb_get_alloc_func (ic -> entry -> class_value ) != rb_class_allocate_instance ) {
892
- return YJIT_CANT_COMPILE ;
893
- }
992
+ ID ivar_name = (ID )jit_get_arg (jit , 0 );
894
993
895
- uint32_t ivar_index = ic -> entry -> index ;
994
+ VALUE comptime_val = jit_peek_at_self (jit , ctx );
995
+ VALUE comptime_val_klass = CLASS_OF (comptime_val );
896
996
897
- // Create a size-exit to fall back to the interpreter
898
- uint8_t * side_exit = yjit_side_exit (jit , ctx );
997
+ // Generate a side exit
998
+ uint8_t * side_exit = yjit_side_exit (jit , ctx );
899
999
900
- // Load self from CFP
1000
+ // Guard that the receiver has the same class as the one from compile time.
901
1001
mov (cb , REG0 , member_opnd (REG_CFP , rb_control_frame_t , self ));
1002
+ guard_self_is_heap (cb , REG0 , COUNTED_EXIT (side_exit , setivar_se_self_not_heap ), ctx );
902
1003
903
- guard_self_is_heap (cb , REG0 , side_exit , ctx );
904
-
905
- // Bail if receiver class is different from compiled time call cache class
906
- x86opnd_t klass_opnd = mem_opnd (64 , REG0 , offsetof(struct RBasic , klass ));
907
- mov (cb , REG1 , klass_opnd );
908
- x86opnd_t serial_opnd = mem_opnd (64 , REG1 , offsetof(struct RClass , class_serial ));
909
- cmp (cb , serial_opnd , imm_opnd (ic -> entry -> class_serial ));
910
- jne_ptr (cb , side_exit );
911
-
912
- // Bail if the ivars are not on the extended table
913
- // See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
914
- x86opnd_t flags_opnd = member_opnd (REG0 , struct RBasic , flags );
915
- test (cb , flags_opnd , imm_opnd (ROBJECT_EMBED ));
916
- jnz_ptr (cb , side_exit );
917
-
918
- // If we can't guarantee that the extended table is big enoughg
919
- if (ivar_index >= ROBJECT_EMBED_LEN_MAX + 1 ) {
920
- // Check that the slot is inside the extended table (num_slots > index)
921
- x86opnd_t num_slots = mem_opnd (32 , REG0 , offsetof(struct RObject , as .heap .numiv ));
922
- cmp (cb , num_slots , imm_opnd (ivar_index ));
923
- jle_ptr (cb , side_exit );
924
- }
925
-
926
- // Get a pointer to the extended table
927
- x86opnd_t tbl_opnd = mem_opnd (64 , REG0 , offsetof(struct RObject , as .heap .ivptr ));
928
- mov (cb , REG0 , tbl_opnd );
929
-
930
- // Pop the value to write from the stack
931
- x86opnd_t stack_top = ctx_stack_pop (ctx , 1 );
932
- mov (cb , REG1 , stack_top );
933
-
934
- // Bail if this is a heap object, because this needs a write barrier
935
- test (cb , REG1 , imm_opnd (RUBY_IMMEDIATE_MASK ));
936
- jz_ptr (cb , side_exit );
937
-
938
- // Write the ivar to the extended table
939
- x86opnd_t ivar_opnd = mem_opnd (64 , REG0 , sizeof (VALUE ) * ivar_index );
940
- mov (cb , ivar_opnd , REG1 );
1004
+ jit_guard_known_klass (jit , ctx , comptime_val_klass , OPND_SELF , GETIVAR_MAX_DEPTH , side_exit );
941
1005
942
- return YJIT_KEEP_COMPILING ;
1006
+ return gen_set_ivar ( jit , ctx , GETIVAR_MAX_DEPTH , comptime_val , ivar_name , OPND_SELF , side_exit ) ;
943
1007
}
944
1008
945
1009
static void
0 commit comments