Skip to content

Commit 5192cdc

Browse files
PenguinwizzardMikeHolman
authored andcommitted
[CVE-2018-8354] Array guards needed for asmjs on x86
1 parent e03b3e3 commit 5192cdc

File tree

1 file changed

+61
-4
lines changed

1 file changed

+61
-4
lines changed

lib/Backend/Lower.cpp

Lines changed: 61 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9274,6 +9274,61 @@ void Lowerer::LowerLdLen(IR::Instr *const instr, const bool isHelper)
92749274
LowerLdFld(instr, IR::HelperOp_GetProperty, IR::HelperOp_GetProperty, false, nullptr, isHelper);
92759275
}
92769276

9277+
IR::Instr* InsertMaskableMove(bool isStore, bool generateWriteBarrier, IR::Opnd* dst, IR::Opnd* src1, IR::Opnd* src2, IR::Opnd* indexOpnd, IR::Instr* insertBeforeInstr, Lowerer* lowerer)
9278+
{
9279+
Assert(insertBeforeInstr->m_func->GetJITFunctionBody()->IsAsmJsMode());
9280+
9281+
// Mask with the bounds check operand to avoid speculation issues
9282+
const bool usesFastArray = insertBeforeInstr->m_func->GetJITFunctionBody()->UsesWAsmJsFastVirtualBuffer();
9283+
IR::RegOpnd* mask = nullptr;
9284+
bool shouldMaskResult = false;
9285+
if (!usesFastArray)
9286+
{
9287+
bool shouldMask = isStore ? CONFIG_FLAG_RELEASE(PoisonTypedArrayStore) : CONFIG_FLAG_RELEASE(PoisonTypedArrayLoad);
9288+
if (shouldMask && indexOpnd != nullptr)
9289+
{
9290+
// indices in asmjs fit in 32 bits, but we need a mask
9291+
IR::RegOpnd* temp = IR::RegOpnd::New(indexOpnd->GetType(), insertBeforeInstr->m_func);
9292+
lowerer->InsertMove(temp, indexOpnd, insertBeforeInstr, false);
9293+
lowerer->InsertAdd(false, temp, temp, IR::IntConstOpnd::New((uint32)src1->GetSize() - 1, temp->GetType(), insertBeforeInstr->m_func, true), insertBeforeInstr);
9294+
9295+
// For native ints and vars, we do the masking after the load; we don't do this for
9296+
// floats and doubles because the conversion to and from fp regs is slow.
9297+
shouldMaskResult = (!isStore) && IRType_IsNativeIntOrVar(src1->GetType()) && TySize[dst->GetType()] <= TySize[TyMachReg];
9298+
9299+
// When we do post-load masking, we AND the mask with dst, so they need to have the
9300+
// same type, as otherwise we'll hit asserts later on. When we do pre-load masking,
9301+
// we AND the mask with the index component of the indir opnd for the move from the
9302+
// array, so we need to align with that type instead.
9303+
mask = IR::RegOpnd::New((shouldMaskResult ? dst : indexOpnd)->GetType(), insertBeforeInstr->m_func);
9304+
9305+
if (temp->GetSize() != mask->GetSize())
9306+
{
9307+
Assert(mask->GetSize() == MachPtr);
9308+
Assert(src2->GetType() == TyUint32);
9309+
temp = temp->UseWithNewType(TyMachPtr, insertBeforeInstr->m_func)->AsRegOpnd();
9310+
src2 = src2->UseWithNewType(TyMachPtr, insertBeforeInstr->m_func)->AsRegOpnd();
9311+
}
9312+
9313+
lowerer->InsertSub(false, mask, temp, src2, insertBeforeInstr);
9314+
lowerer->InsertShift(Js::OpCode::Shr_A, false, mask, mask, IR::IntConstOpnd::New(TySize[mask->GetType()] * 8 - 1, TyInt8, insertBeforeInstr->m_func), insertBeforeInstr);
9315+
9316+
// If we're not masking the result, we're masking the index
9317+
if (!shouldMaskResult)
9318+
{
9319+
lowerer->InsertAnd(indexOpnd, indexOpnd, mask, insertBeforeInstr);
9320+
}
9321+
}
9322+
}
9323+
IR::Instr* ret = lowerer->InsertMove(dst, src1, insertBeforeInstr, generateWriteBarrier);
9324+
if(!usesFastArray && shouldMaskResult)
9325+
{
9326+
// Mask the result if we didn't use the mask earlier to mask the index
9327+
lowerer->InsertAnd(dst, dst, mask, insertBeforeInstr);
9328+
}
9329+
return ret;
9330+
}
9331+
92779332
IR::Instr *
92789333
Lowerer::LowerLdArrViewElem(IR::Instr * instr)
92799334
{
@@ -9342,7 +9397,8 @@ Lowerer::LowerLdArrViewElem(IR::Instr * instr)
93429397
}
93439398
done = instr;
93449399
}
9345-
InsertMove(dst, src1, done);
9400+
9401+
InsertMaskableMove(false, true, dst, src1, src2, indexOpnd, done, this);
93469402

93479403
instr->Remove();
93489404
return instrPrev;
@@ -9390,7 +9446,8 @@ Lowerer::LowerLdArrViewElemWasm(IR::Instr * instr)
93909446
Assert(!dst->IsFloat64() || src1->IsFloat64());
93919447

93929448
IR::Instr * done = LowerWasmArrayBoundsCheck(instr, src1);
9393-
IR::Instr* newMove = InsertMove(dst, src1, done);
9449+
9450+
IR::Instr* newMove = InsertMaskableMove(false, true, dst, src1, instr->GetSrc2(), src1->AsIndirOpnd()->GetIndexOpnd(), done, this);
93949451

93959452
if (m_func->GetJITFunctionBody()->UsesWAsmJsFastVirtualBuffer())
93969453
{
@@ -9667,8 +9724,8 @@ Lowerer::LowerStArrViewElem(IR::Instr * instr)
96679724
instr->FreeSrc2();
96689725
}
96699726
}
9670-
// wasm memory buffer is not recycler allocated, so we shouldn't generate write barrier
9671-
InsertMove(dst, src1, done, false);
9727+
// wasm memory buffer is not recycler allocated, so we shouldn't generate write barrier
9728+
InsertMaskableMove(true, false, dst, src1, src2, indexOpnd, done, this);
96729729

96739730
instr->Remove();
96749731
return instrPrev;

0 commit comments

Comments
 (0)