diff --git a/common.gypi b/common.gypi index 686fc122383cb3..a2937460b5a897 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.18', + 'v8_embedder_string': '-node.20', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/heap/base/asm/s390/push_registers_asm.cc b/deps/v8/src/heap/base/asm/s390/push_registers_asm.cc index ef954fa03ae8f2..80b6cf0664d627 100644 --- a/deps/v8/src/heap/base/asm/s390/push_registers_asm.cc +++ b/deps/v8/src/heap/base/asm/s390/push_registers_asm.cc @@ -21,17 +21,17 @@ asm(".text \n" "PushAllRegistersAndIterateStack: \n" // Push all callee-saved registers. // r6-r13, r14 and sp(r15) - " stmg %r6, %sp, 48(%sp) \n" + " stmg %r6, %r15, 48(%r15) \n" // Allocate frame. - " lay %sp, -160(%sp) \n" + " lay %r15, -160(%r15) \n" // Pass 1st parameter (r2) unchanged (Stack*). // Pass 2nd parameter (r3) unchanged (StackVisitor*). // Save 3rd parameter (r4; IterateStackCallback). " lgr %r5, %r4 \n" // Pass sp as 3rd parameter. 160+48 to point // to callee saved region stored above. - " lay %r4, 208(%sp) \n" + " lay %r4, 208(%r15) \n" // Call the callback. " basr %r14, %r5 \n" - " lmg %r14,%sp, 272(%sp) \n" + " lmg %r14,%r15, 272(%r15) \n" " br %r14 \n"); diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-inl.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-inl.h index 7ab51af2be9e68..2256229f868c56 100644 --- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-inl.h +++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc-inl.h @@ -2893,14 +2893,38 @@ void LiftoffAssembler::CallC(const std::initializer_list args, parallel_move.LoadIntoRegister(LiftoffRegister{kCArgRegs[reg_args]}, arg); ++reg_args; } else { - int bias = 0; - // On BE machines values with less than 8 bytes are right justified. - // bias here is relative to the stack pointer. - if (arg.kind() == kI32 || arg.kind() == kF32) bias = -stack_bias; int offset = (kStackFrameExtraParamSlot + stack_args) * kSystemPointerSize; - MemOperand dst{sp, offset + bias}; - liftoff::StoreToMemory(this, dst, arg, r0, ip); + MemOperand dst{sp, offset}; + Register scratch1 = r0; + Register scratch2 = ip; + if (arg.is_reg()) { + switch (arg.kind()) { + case kI16: + extsh(scratch1, arg.reg().gp()); + StoreU64(scratch1, dst); + break; + case kI32: + extsw(scratch1, arg.reg().gp()); + StoreU64(scratch1, dst); + break; + case kI64: + StoreU64(arg.reg().gp(), dst); + break; + default: + UNREACHABLE(); + } + } else if (arg.is_const()) { + mov(scratch1, Operand(static_cast(arg.i32_const()))); + StoreU64(scratch1, dst); + } else if (value_kind_size(arg.kind()) == 4) { + LoadS32(scratch1, liftoff::GetStackSlot(arg.offset()), scratch2); + StoreU64(scratch1, dst); + } else { + DCHECK_EQ(8, value_kind_size(arg.kind())); + LoadU64(scratch1, liftoff::GetStackSlot(arg.offset()), scratch1); + StoreU64(scratch1, dst); + } ++stack_args; } } diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-inl.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-inl.h index bfd1eaf61e69f0..0be7b0e880a543 100644 --- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-inl.h +++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390-inl.h @@ -3271,14 +3271,37 @@ void LiftoffAssembler::CallC(const std::initializer_list args, parallel_move.LoadIntoRegister(LiftoffRegister{kCArgRegs[reg_args]}, arg); ++reg_args; } else { - int bias = 0; - // On BE machines values with less than 8 bytes are right justified. - // bias here is relative to the stack pointer. - if (arg.kind() == kI32 || arg.kind() == kF32) bias = -stack_bias; int offset = (kStackFrameExtraParamSlot + stack_args) * kSystemPointerSize; - MemOperand dst{sp, offset + bias}; - liftoff::StoreToMemory(this, dst, arg, ip); + MemOperand dst{sp, offset}; + Register scratch = ip; + if (arg.is_reg()) { + switch (arg.kind()) { + case kI16: + LoadS16(scratch, arg.reg().gp()); + StoreU64(scratch, dst); + break; + case kI32: + LoadS32(scratch, arg.reg().gp()); + StoreU64(scratch, dst); + break; + case kI64: + StoreU64(arg.reg().gp(), dst); + break; + default: + UNREACHABLE(); + } + } else if (arg.is_const()) { + mov(scratch, Operand(static_cast(arg.i32_const()))); + StoreU64(scratch, dst); + } else if (value_kind_size(arg.kind()) == 4) { + LoadS32(scratch, liftoff::GetStackSlot(arg.offset()), scratch); + StoreU64(scratch, dst); + } else { + DCHECK_EQ(8, value_kind_size(arg.kind())); + LoadU64(scratch, liftoff::GetStackSlot(arg.offset()), scratch); + StoreU64(scratch, dst); + } ++stack_args; } }