|
24 | 24 | */ |
25 | 25 |
|
26 | 26 | // Major contributions by AHa, AS, JL, ML. |
27 | | - |
| 27 | +long fubar = 0; |
28 | 28 | #include "asm/macroAssembler.inline.hpp" |
29 | 29 | #include "gc/shared/barrierSet.hpp" |
30 | 30 | #include "gc/shared/barrierSetAssembler.hpp" |
@@ -182,10 +182,35 @@ void InterpreterMacroAssembler::call_VM_preemptable(Register oop_result, address |
182 | 182 | } |
183 | 183 | #endif // ASSERT |
184 | 184 |
|
| 185 | + Label resume_pc, not_preempted; |
| 186 | + push_cont_fastpath(); |
| 187 | + |
| 188 | + assert(arg_1 != Z_ARG1, "register corruption"); |
| 189 | + load_const_optimized(Z_R1, (uintptr_t)&fubar); |
| 190 | + z_agsi(0, Z_R1, 1); |
| 191 | + z_lg(Z_ARG1, Address(Z_SP, _z_abi(callers_sp))); |
| 192 | + |
| 193 | + // We set resume_pc as last java pc. It will be saved if the vthread gets preempted. |
| 194 | + // Later execution will continue right there. |
| 195 | + lr_if_needed(Z_ARG2, arg_1); |
| 196 | + |
| 197 | + call_VM(oop_result, entry_point, false /*check_exceptions*/, &resume_pc /* last_java_pc */); |
| 198 | + |
| 199 | + pop_cont_fastpath(); |
| 200 | + |
| 201 | + // Jump to handler if the call was preempted |
| 202 | + z_lg(Z_R1_scratch, Address(Z_thread, in_bytes(JavaThread::preempt_alternate_return_offset()))); |
| 203 | + z_ltgr(Z_R1_scratch, Z_R1_scratch); |
| 204 | + z_brz(not_preempted); |
| 205 | + |
| 206 | + z_mvghi(Address(Z_thread, in_bytes(JavaThread::preempt_alternate_return_offset())), 0); |
| 207 | + z_br(Z_R1_scratch); // branch to handler in Z_R1_scratch |
185 | 208 |
|
| 209 | + bind(resume_pc); // Location to resume execution |
186 | 210 |
|
| 211 | + stop("restore_after_resume"); |
187 | 212 |
|
188 | | - stop("not yet implemented call_VM_preemptable"); |
| 213 | + bind(not_preempted); |
189 | 214 | } |
190 | 215 |
|
191 | 216 | void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point) { |
|
0 commit comments