Skip to content

Commit 902d1c7

Browse files
Merge in jdk-23.0.2+4 (24.1)
PullRequest: labsjdk-ce/125
2 parents a335b49 + e087d05 commit 902d1c7

File tree

28 files changed

+678
-166
lines changed

28 files changed

+678
-166
lines changed

src/hotspot/cpu/aarch64/nativeInst_aarch64.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ address NativeCall::destination() const {
7878
//
7979
// Used in the runtime linkage of calls; see class CompiledIC.
8080
void NativeCall::set_destination_mt_safe(address dest) {
81-
assert((Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
81+
assert((CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
8282
CompiledICLocker::is_safe(addr_at(0)),
8383
"concurrent code patching");
8484

src/hotspot/cpu/ppc/c1_LIRAssembler_ppc.cpp

Lines changed: 19 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -133,9 +133,20 @@ void LIR_Assembler::osr_entry() {
133133
// copied into place by code emitted in the IR.
134134

135135
Register OSR_buf = osrBufferPointer()->as_register();
136-
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
137-
int monitor_offset = BytesPerWord * method()->max_locals() +
138-
(2 * BytesPerWord) * (number_of_locks - 1);
136+
{
137+
assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
138+
139+
const int locals_space = BytesPerWord * method()->max_locals();
140+
int monitor_offset = locals_space + (2 * BytesPerWord) * (number_of_locks - 1);
141+
bool use_OSR_bias = false;
142+
143+
if (!Assembler::is_simm16(monitor_offset + BytesPerWord) && number_of_locks > 0) {
144+
// Offsets too large for ld instructions. Use bias.
145+
__ add_const_optimized(OSR_buf, OSR_buf, locals_space);
146+
monitor_offset -= locals_space;
147+
use_OSR_bias = true;
148+
}
149+
139150
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
140151
// the OSR buffer using 2 word entries: first the lock and then
141152
// the oop.
@@ -161,6 +172,11 @@ void LIR_Assembler::osr_entry() {
161172
__ ld(R0, slot_offset + 1*BytesPerWord, OSR_buf);
162173
__ std(R0, mo.disp(), mo.base());
163174
}
175+
176+
if (use_OSR_bias) {
177+
// Restore.
178+
__ sub_const_optimized(OSR_buf, OSR_buf, locals_space);
179+
}
164180
}
165181
}
166182

src/hotspot/cpu/ppc/nativeInst_ppc.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -92,10 +92,10 @@ address NativeCall::destination() const {
9292
// Used in the runtime linkage of calls; see class CompiledIC.
9393
//
9494
// Add parameter assert_lock to switch off assertion
95-
// during code generation, where no patching lock is needed.
95+
// during code generation, where no lock is needed.
9696
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
9797
assert(!assert_lock ||
98-
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
98+
(CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
9999
CompiledICLocker::is_safe(addr_at(0)),
100100
"concurrent code patching");
101101

src/hotspot/cpu/ppc/ppc.ad

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3427,6 +3427,7 @@ encode %{
34273427
call->_oop_map = _oop_map;
34283428
call->_jvms = _jvms;
34293429
call->_jvmadj = _jvmadj;
3430+
call->_has_ea_local_in_scope = _has_ea_local_in_scope;
34303431
call->_in_rms = _in_rms;
34313432
call->_nesting = _nesting;
34323433
call->_override_symbolic_info = _override_symbolic_info;

src/hotspot/cpu/riscv/nativeInst_riscv.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,10 +68,10 @@ address NativeCall::destination() const {
6868
// Used in the runtime linkage of calls; see class CompiledIC.
6969
//
7070
// Add parameter assert_lock to switch off assertion
71-
// during code generation, where no patching lock is needed.
71+
// during code generation, where no lock is needed.
7272
void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
7373
assert(!assert_lock ||
74-
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
74+
(CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
7575
CompiledICLocker::is_safe(addr_at(0)),
7676
"concurrent code patching");
7777

src/hotspot/cpu/s390/nativeInst_s390.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -658,8 +658,8 @@ void NativeGeneralJump::insert_unconditional(address code_pos, address entry) {
658658

659659
void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer) {
660660
assert(((intptr_t)instr_addr & (BytesPerWord-1)) == 0, "requirement for mt safe patching");
661-
// Bytes_after_jump cannot change, because we own the Patching_lock.
662-
assert(Patching_lock->owned_by_self(), "must hold lock to patch instruction");
661+
// Bytes_after_jump cannot change, because we own the CodeCache_lock.
662+
assert(CodeCache_lock->owned_by_self(), "must hold lock to patch instruction");
663663
intptr_t bytes_after_jump = (*(intptr_t*)instr_addr) & 0x000000000000ffffL; // 2 bytes after jump.
664664
intptr_t load_const_bytes = (*(intptr_t*)code_buffer) & 0xffffffffffff0000L;
665665
*(intptr_t*)instr_addr = load_const_bytes | bytes_after_jump;

src/hotspot/cpu/x86/nativeInst_x86.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ void NativeCall::insert(address code_pos, address entry) {
218218
// (spinlock). Then patches the last byte, and then atomically replaces
219219
// the jmp's with the first 4 byte of the new instruction.
220220
void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
221-
assert(Patching_lock->is_locked() ||
221+
assert(CodeCache_lock->is_locked() ||
222222
SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
223223
assert (instr_addr != nullptr, "illegal address for code patching");
224224

@@ -281,7 +281,7 @@ void NativeCall::set_destination_mt_safe(address dest) {
281281
debug_only(verify());
282282
// Make sure patching code is locked. No two threads can patch at the same
283283
// time but one may be executing this code.
284-
assert(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint() ||
284+
assert(CodeCache_lock->is_locked() || SafepointSynchronize::is_at_safepoint() ||
285285
CompiledICLocker::is_safe(instruction_address()), "concurrent code patching");
286286
// Both C1 and C2 should now be generating code which aligns the patched address
287287
// to be within a single cache line.

src/hotspot/share/c1/c1_Runtime1.cpp

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -880,7 +880,7 @@ static Klass* resolve_field_return_klass(const methodHandle& caller, int bci, TR
880880
// movl reg, [reg1 + <const>] (for field offsets)
881881
// jmp continue
882882
// <being_init offset> <bytes to copy> <bytes to skip>
883-
// patch_stub: jmp Runtim1::patch_code (through a runtime stub)
883+
// patch_stub: jmp Runtime1::patch_code (through a runtime stub)
884884
// jmp patch_site
885885
//
886886
// If the class is being initialized the patch body is rewritten and
@@ -1096,7 +1096,7 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
10961096
// Now copy code back
10971097

10981098
{
1099-
MutexLocker ml_patch (current, Patching_lock, Mutex::_no_safepoint_check_flag);
1099+
MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
11001100
//
11011101
// Deoptimization may have happened while we waited for the lock.
11021102
// In that case we don't bother to do any patching we just return
@@ -1261,12 +1261,8 @@ JRT_ENTRY(void, Runtime1::patch_code(JavaThread* current, Runtime1::StubID stub_
12611261
}
12621262
}
12631263
}
1264-
}
1265-
1266-
// If we are patching in a non-perm oop, make sure the nmethod
1267-
// is on the right list.
1268-
{
1269-
MutexLocker ml_code (current, CodeCache_lock, Mutex::_no_safepoint_check_flag);
1264+
// If we are patching in a non-perm oop, make sure the nmethod
1265+
// is on the right list.
12701266
nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
12711267
guarantee(nm != nullptr, "only nmethods can contain non-perm oops");
12721268

src/hotspot/share/code/nmethod.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2052,7 +2052,7 @@ bool nmethod::make_not_entrant() {
20522052
} // leave critical region under NMethodState_lock
20532053

20542054
#if INCLUDE_JVMCI
2055-
// Invalidate can't occur while holding the Patching lock
2055+
// Invalidate can't occur while holding the NMethodState_lock
20562056
JVMCINMethodData* nmethod_data = jvmci_nmethod_data();
20572057
if (nmethod_data != nullptr) {
20582058
nmethod_data->invalidate_nmethod_mirror(this);

src/hotspot/share/gc/shenandoah/c2/shenandoahSupport.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1060,6 +1060,7 @@ void ShenandoahBarrierC2Support::fix_ctrl(Node* barrier, Node* region, const Mem
10601060
Node* u = ctrl->fast_out(i);
10611061
if (u->_idx < last &&
10621062
u != barrier &&
1063+
!u->depends_only_on_test() && // preserve dependency on test
10631064
!uses_to_ignore.member(u) &&
10641065
(u->in(0) != ctrl || (!u->is_Region() && !u->is_Phi())) &&
10651066
(ctrl->Opcode() != Op_CatchProj || u->Opcode() != Op_CreateEx)) {

0 commit comments

Comments
 (0)