diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp index 9e69c913a7b..a1a76fa5eec 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp @@ -1892,3 +1892,15 @@ void InterpreterMacroAssembler::load_resolved_indy_entry(Register cache, Registe add(cache, cache, Array::base_offset_in_bytes()); lea(cache, Address(cache, index)); } + +#ifdef ASSERT +void InterpreterMacroAssembler::verify_field_offset(Register reg) { + // Verify the field offset is not in the header, implicitly checks for 0 + Label L; + subs(zr, reg, static_cast(sizeof(markWord) + (UseCompressedClassPointers ? sizeof(narrowKlass) : sizeof(Klass*)))); + br(Assembler::GE, L); + stop("bad field offset"); + bind(L); +} +#endif + diff --git a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp index 019eb235581..401e107bcd5 100644 --- a/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp +++ b/src/hotspot/cpu/aarch64/interp_masm_aarch64.hpp @@ -321,6 +321,8 @@ class InterpreterMacroAssembler: public MacroAssembler { } void load_resolved_indy_entry(Register cache, Register index); + + void verify_field_offset(Register reg) NOT_DEBUG_RETURN; }; #endif // CPU_AARCH64_INTERP_MASM_AARCH64_HPP diff --git a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp index 240190ab692..b034fc31482 100644 --- a/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/templateTable_aarch64.cpp @@ -166,6 +166,7 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, Register temp_reg, bool load_bc_into_bc_reg/*=true*/, int byte_no) { + assert_different_registers(bc_reg, temp_reg); if (!RewriteBytecodes) return; Label L_patch_done; @@ -223,8 +224,12 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg, __ bind(L_okay); #endif - // patch bytecode - __ strb(bc_reg, at_bcp(0)); + // Patch bytecode with release store to coordinate with ConstantPoolCacheEntry loads + // in fast bytecode codelets. The fast bytecode codelets have a memory barrier that gains + // the needed ordering, together with control dependency on entering the fast codelet + // itself. + __ lea(temp_reg, at_bcp(0)); + __ stlrb(bc_reg, temp_reg); __ bind(L_patch_done); } @@ -2982,6 +2987,7 @@ void TemplateTable::fast_storefield(TosState state) // replace index with field offset from cache entry __ ldr(r1, Address(r2, in_bytes(base + ConstantPoolCacheEntry::f2_offset()))); + __ verify_field_offset(r1); { Label notVolatile; @@ -3075,6 +3081,8 @@ void TemplateTable::fast_accessfield(TosState state) __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()))); + __ verify_field_offset(r1); + __ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::flags_offset()))); @@ -3142,8 +3150,13 @@ void TemplateTable::fast_xaccess(TosState state) __ ldr(r0, aaddress(0)); // access constant pool cache __ get_cache_and_index_at_bcp(r2, r3, 2); + + // Must prevent reordering of the following cp cache loads with bytecode load + __ membar(MacroAssembler::LoadLoad); + __ ldr(r1, Address(r2, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset()))); + __ verify_field_offset(r1); // 8179954: We need to make sure that the code generated for // volatile accesses forms a sequentially-consistent set of