@@ -177,18 +177,16 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len,
177177}
178178
179179int C1_MacroAssembler::lock_object (Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
180- Label done, fast_lock, fast_lock_done;
181180 int null_check_offset = 0 ;
182181
183182 const Register tmp2 = Rtemp; // Rtemp should be free at c1 LIR level
184183 assert_different_registers (hdr, obj, disp_hdr, tmp2);
185184
186185 assert (BasicObjectLock::lock_offset () == 0 , " adjust this code" );
187- const ByteSize obj_offset = BasicObjectLock::obj_offset ();
188- const int mark_offset = BasicLock::displaced_header_offset_in_bytes ();
186+ assert (oopDesc::mark_offset_in_bytes () == 0 , " Required by atomic instructions" );
189187
190188 // save object being locked into the BasicObjectLock
191- str (obj, Address (disp_hdr, obj_offset));
189+ str (obj, Address (disp_hdr, BasicObjectLock:: obj_offset() ));
192190
193191 null_check_offset = offset ();
194192
@@ -199,95 +197,29 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
199197 b (slow_case, ne);
200198 }
201199
202- assert (oopDesc::mark_offset_in_bytes () == 0 , " Required by atomic instructions" );
203-
204- if (LockingMode == LM_LIGHTWEIGHT) {
205-
206- Register t1 = disp_hdr; // Needs saving, probably
207- Register t2 = hdr; // blow
208- Register t3 = Rtemp; // blow
209-
210- lightweight_lock (obj /* obj */ , t1, t2, t3, 1 /* savemask - save t1 */ , slow_case);
211- // Success: fall through
212-
213- } else if (LockingMode == LM_LEGACY) {
214-
215- // On MP platforms the next load could return a 'stale' value if the memory location has been modified by another thread.
216- // That would be acceptable as ether CAS or slow case path is taken in that case.
217-
218- // Must be the first instruction here, because implicit null check relies on it
219- ldr (hdr, Address (obj, oopDesc::mark_offset_in_bytes ()));
220-
221- tst (hdr, markWord::unlocked_value);
222- b (fast_lock, ne);
223-
224- // Check for recursive locking
225- // See comments in InterpreterMacroAssembler::lock_object for
226- // explanations on the fast recursive locking check.
227- // -1- test low 2 bits
228- movs (tmp2, AsmOperand (hdr, lsl, 30 ));
229- // -2- test (hdr - SP) if the low two bits are 0
230- sub (tmp2, hdr, SP, eq);
231- movs (tmp2, AsmOperand (tmp2, lsr, exact_log2 (os::vm_page_size ())), eq);
232- // If still 'eq' then recursive locking OK
233- // set to zero if recursive lock, set to non zero otherwise (see discussion in JDK-8267042)
234- str (tmp2, Address (disp_hdr, mark_offset));
235- b (fast_lock_done, eq);
236- // else need slow case
237- b (slow_case);
238-
239-
240- bind (fast_lock);
241- // Save previous object header in BasicLock structure and update the header
242- str (hdr, Address (disp_hdr, mark_offset));
243-
244- cas_for_lock_acquire (hdr, disp_hdr, obj, tmp2, slow_case);
245-
246- bind (fast_lock_done);
247- }
248- bind (done);
200+ Register t1 = disp_hdr; // Needs saving, probably
201+ Register t2 = hdr; // blow
202+ Register t3 = Rtemp; // blow
249203
204+ lightweight_lock (obj, t1, t2, t3, 1 /* savemask - save t1 */ , slow_case);
205+ // Success: fall through
250206 return null_check_offset;
251207}
252208
253209void C1_MacroAssembler::unlock_object (Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
254210 assert_different_registers (hdr, obj, disp_hdr, Rtemp);
255- Register tmp2 = Rtemp;
256211
257212 assert (BasicObjectLock::lock_offset () == 0 , " adjust this code" );
258- const ByteSize obj_offset = BasicObjectLock::obj_offset ();
259- const int mark_offset = BasicLock::displaced_header_offset_in_bytes ();
260-
261- Label done;
262-
263213 assert (oopDesc::mark_offset_in_bytes () == 0 , " Required by atomic instructions" );
264214
265- if (LockingMode == LM_LIGHTWEIGHT) {
215+ ldr (obj, Address (disp_hdr, BasicObjectLock::obj_offset ()));
266216
267- ldr (obj, Address (disp_hdr, obj_offset));
217+ Register t1 = disp_hdr; // Needs saving, probably
218+ Register t2 = hdr; // blow
219+ Register t3 = Rtemp; // blow
268220
269- Register t1 = disp_hdr; // Needs saving, probably
270- Register t2 = hdr; // blow
271- Register t3 = Rtemp; // blow
272-
273- lightweight_unlock (obj /* object */ , t1, t2, t3, 1 /* savemask (save t1) */ ,
274- slow_case);
275- // Success: Fall through
276-
277- } else if (LockingMode == LM_LEGACY) {
278-
279- // Load displaced header and object from the lock
280- ldr (hdr, Address (disp_hdr, mark_offset));
281- // If hdr is null, we've got recursive locking and there's nothing more to do
282- cbz (hdr, done);
283-
284- // load object
285- ldr (obj, Address (disp_hdr, obj_offset));
286-
287- // Restore the object header
288- cas_for_lock_release (disp_hdr, hdr, obj, tmp2, slow_case);
289- }
290- bind (done);
221+ lightweight_unlock (obj, t1, t2, t3, 1 /* savemask - save t1 */ , slow_case);
222+ // Success: fall through
291223}
292224
293225#ifndef PRODUCT
0 commit comments