@@ -9274,6 +9274,61 @@ void Lowerer::LowerLdLen(IR::Instr *const instr, const bool isHelper)
9274
9274
LowerLdFld(instr, IR::HelperOp_GetProperty, IR::HelperOp_GetProperty, false, nullptr, isHelper);
9275
9275
}
9276
9276
9277
+ IR::Instr* InsertMaskableMove(bool isStore, bool generateWriteBarrier, IR::Opnd* dst, IR::Opnd* src1, IR::Opnd* src2, IR::Opnd* indexOpnd, IR::Instr* insertBeforeInstr, Lowerer* lowerer)
9278
+ {
9279
+ Assert(insertBeforeInstr->m_func->GetJITFunctionBody()->IsAsmJsMode());
9280
+
9281
+ // Mask with the bounds check operand to avoid speculation issues
9282
+ const bool usesFastArray = insertBeforeInstr->m_func->GetJITFunctionBody()->UsesWAsmJsFastVirtualBuffer();
9283
+ IR::RegOpnd* mask = nullptr;
9284
+ bool shouldMaskResult = false;
9285
+ if (!usesFastArray)
9286
+ {
9287
+ bool shouldMask = isStore ? CONFIG_FLAG_RELEASE(PoisonTypedArrayStore) : CONFIG_FLAG_RELEASE(PoisonTypedArrayLoad);
9288
+ if (shouldMask && indexOpnd != nullptr)
9289
+ {
9290
+ // indices in asmjs fit in 32 bits, but we need a mask
9291
+ IR::RegOpnd* temp = IR::RegOpnd::New(indexOpnd->GetType(), insertBeforeInstr->m_func);
9292
+ lowerer->InsertMove(temp, indexOpnd, insertBeforeInstr, false);
9293
+ lowerer->InsertAdd(false, temp, temp, IR::IntConstOpnd::New((uint32)src1->GetSize() - 1, temp->GetType(), insertBeforeInstr->m_func, true), insertBeforeInstr);
9294
+
9295
+ // For native ints and vars, we do the masking after the load; we don't do this for
9296
+ // floats and doubles because the conversion to and from fp regs is slow.
9297
+ shouldMaskResult = (!isStore) && IRType_IsNativeIntOrVar(src1->GetType()) && TySize[dst->GetType()] <= TySize[TyMachReg];
9298
+
9299
+ // When we do post-load masking, we AND the mask with dst, so they need to have the
9300
+ // same type, as otherwise we'll hit asserts later on. When we do pre-load masking,
9301
+ // we AND the mask with the index component of the indir opnd for the move from the
9302
+ // array, so we need to align with that type instead.
9303
+ mask = IR::RegOpnd::New((shouldMaskResult ? dst : indexOpnd)->GetType(), insertBeforeInstr->m_func);
9304
+
9305
+ if (temp->GetSize() != mask->GetSize())
9306
+ {
9307
+ Assert(mask->GetSize() == MachPtr);
9308
+ Assert(src2->GetType() == TyUint32);
9309
+ temp = temp->UseWithNewType(TyMachPtr, insertBeforeInstr->m_func)->AsRegOpnd();
9310
+ src2 = src2->UseWithNewType(TyMachPtr, insertBeforeInstr->m_func)->AsRegOpnd();
9311
+ }
9312
+
9313
+ lowerer->InsertSub(false, mask, temp, src2, insertBeforeInstr);
9314
+ lowerer->InsertShift(Js::OpCode::Shr_A, false, mask, mask, IR::IntConstOpnd::New(TySize[mask->GetType()] * 8 - 1, TyInt8, insertBeforeInstr->m_func), insertBeforeInstr);
9315
+
9316
+ // If we're not masking the result, we're masking the index
9317
+ if (!shouldMaskResult)
9318
+ {
9319
+ lowerer->InsertAnd(indexOpnd, indexOpnd, mask, insertBeforeInstr);
9320
+ }
9321
+ }
9322
+ }
9323
+ IR::Instr* ret = lowerer->InsertMove(dst, src1, insertBeforeInstr, generateWriteBarrier);
9324
+ if(!usesFastArray && shouldMaskResult)
9325
+ {
9326
+ // Mask the result if we didn't use the mask earlier to mask the index
9327
+ lowerer->InsertAnd(dst, dst, mask, insertBeforeInstr);
9328
+ }
9329
+ return ret;
9330
+ }
9331
+
9277
9332
IR::Instr *
9278
9333
Lowerer::LowerLdArrViewElem(IR::Instr * instr)
9279
9334
{
@@ -9342,7 +9397,8 @@ Lowerer::LowerLdArrViewElem(IR::Instr * instr)
9342
9397
}
9343
9398
done = instr;
9344
9399
}
9345
- InsertMove(dst, src1, done);
9400
+
9401
+ InsertMaskableMove(false, true, dst, src1, src2, indexOpnd, done, this);
9346
9402
9347
9403
instr->Remove();
9348
9404
return instrPrev;
@@ -9390,7 +9446,8 @@ Lowerer::LowerLdArrViewElemWasm(IR::Instr * instr)
9390
9446
Assert(!dst->IsFloat64() || src1->IsFloat64());
9391
9447
9392
9448
IR::Instr * done = LowerWasmArrayBoundsCheck(instr, src1);
9393
- IR::Instr* newMove = InsertMove(dst, src1, done);
9449
+
9450
+ IR::Instr* newMove = InsertMaskableMove(false, true, dst, src1, instr->GetSrc2(), src1->AsIndirOpnd()->GetIndexOpnd(), done, this);
9394
9451
9395
9452
if (m_func->GetJITFunctionBody()->UsesWAsmJsFastVirtualBuffer())
9396
9453
{
@@ -9667,8 +9724,8 @@ Lowerer::LowerStArrViewElem(IR::Instr * instr)
9667
9724
instr->FreeSrc2();
9668
9725
}
9669
9726
}
9670
- // wasm memory buffer is not recycler allocated, so we shouldn't generate write barrier
9671
- InsertMove( dst, src1, done, false );
9727
+ // wasm memory buffer is not recycler allocated, so we shouldn't generate write barrier
9728
+ InsertMaskableMove(true, false, dst, src1, src2, indexOpnd, done, this );
9672
9729
9673
9730
instr->Remove();
9674
9731
return instrPrev;
0 commit comments