Skip to content
21 changes: 4 additions & 17 deletions llvm/include/llvm/MC/MCAsmBackend.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@

namespace llvm {

class MCAlignFragment;
class MCFragment;
class MCLEBFragment;
class MCSymbol;
class MCAssembler;
class MCContext;
Expand Down Expand Up @@ -105,21 +103,6 @@ class LLVM_ABI MCAsmBackend {
/// Get information on a fixup kind.
virtual MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const;

/// Hook to check if extra nop bytes must be inserted for alignment directive.
/// For some targets this may be necessary in order to support linker
/// relaxation. The number of bytes to insert are returned in Size.
virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF,
unsigned &Size) {
return false;
}

/// Hook which indicates if the target requires a fixup to be generated when
/// handling an align directive in an executable section
virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm,
MCAlignFragment &AF) {
return false;
}

// Evaluate a fixup, returning std::nullopt to use default handling for
// `Value` and `IsResolved`. Otherwise, returns `IsResolved` with the
// expectation that the hook updates `Value`.
Expand Down Expand Up @@ -177,6 +160,10 @@ class LLVM_ABI MCAsmBackend {
}

// Defined by linker relaxation targets.

// Return false to use default handling. Otherwise, set `Size` to the number
// of padding bytes.
virtual bool relaxAlign(MCFragment &F, unsigned &Size) { return false; }
virtual bool relaxDwarfLineAddr(MCFragment &, bool &WasRelaxed) const {
return false;
}
Expand Down
97 changes: 51 additions & 46 deletions llvm/include/llvm/MC/MCSection.h
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,19 @@ class MCFragment {
uint32_t OperandStart;
uint32_t OperandSize;
} relax;
struct {
// The alignment to ensure, in bytes.
Align Alignment;
// The size of the integer (in bytes) of \p Value.
uint8_t FillLen;
// If true, fill with target-specific nop instructions.
bool EmitNops;
// The maximum number of bytes to emit; if the alignment
// cannot be satisfied in this width then this fragment is ignored.
unsigned MaxBytesToEmit;
// Value to use for filling padding bytes.
int64_t Fill;
} align;
struct {
// True if this is a sleb128, false if uleb128.
bool IsSigned;
Expand Down Expand Up @@ -283,6 +296,7 @@ class MCFragment {
return false;
case MCFragment::FT_Relaxable:
case MCFragment::FT_Data:
case MCFragment::FT_Align:
case MCFragment::FT_Dwarf:
case MCFragment::FT_DwarfFrame:
case MCFragment::FT_LEB:
Expand Down Expand Up @@ -441,6 +455,43 @@ class MCFragment {
llvm::copy(Inst, S.begin() + u.relax.OperandStart);
}

//== FT_Align functions
void makeAlign(Align Alignment, int64_t Fill, uint8_t FillLen,
unsigned MaxBytesToEmit) {
Kind = FT_Align;
u.align.EmitNops = false;
u.align.Alignment = Alignment;
u.align.Fill = Fill;
u.align.FillLen = FillLen;
u.align.MaxBytesToEmit = MaxBytesToEmit;
}

Align getAlignment() const {
assert(Kind == FT_Align);
return u.align.Alignment;
}
int64_t getAlignFill() const {
assert(Kind == FT_Align);
return u.align.Fill;
}
uint8_t getAlignFillLen() const {
assert(Kind == FT_Align);
return u.align.FillLen;
}
unsigned getAlignMaxBytesToEmit() const {
assert(Kind == FT_Align);
return u.align.MaxBytesToEmit;
}
bool hasAlignEmitNops() const {
assert(Kind == FT_Align);
return u.align.EmitNops;
}
void setAlignEmitNops(bool Value, const MCSubtargetInfo *STI) {
assert(Kind == FT_Align);
u.align.EmitNops = Value;
this->STI = STI;
}

//== FT_LEB functions
const MCExpr &getLEBValue() const {
assert(Kind == FT_LEB);
Expand Down Expand Up @@ -486,52 +537,6 @@ class MCEncodedFragment : public MCFragment {
: MCFragment(FType, HasInstructions) {}
};

class MCAlignFragment : public MCFragment {
/// Flag to indicate that (optimal) NOPs should be emitted instead
/// of using the provided value. The exact interpretation of this flag is
/// target dependent.
bool EmitNops : 1;

/// The alignment to ensure, in bytes.
Align Alignment;

/// The size of the integer (in bytes) of \p Value.
uint8_t FillLen;

/// The maximum number of bytes to emit; if the alignment
/// cannot be satisfied in this width then this fragment is ignored.
unsigned MaxBytesToEmit;

/// Value to use for filling padding bytes.
int64_t Fill;

/// When emitting Nops some subtargets have specific nop encodings.
const MCSubtargetInfo *STI = nullptr;

public:
MCAlignFragment(Align Alignment, int64_t Fill, uint8_t FillLen,
unsigned MaxBytesToEmit)
: MCFragment(FT_Align, false), EmitNops(false), Alignment(Alignment),
FillLen(FillLen), MaxBytesToEmit(MaxBytesToEmit), Fill(Fill) {}

Align getAlignment() const { return Alignment; }
int64_t getFill() const { return Fill; }
uint8_t getFillLen() const { return FillLen; }
unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; }

bool hasEmitNops() const { return EmitNops; }
void setEmitNops(bool Value, const MCSubtargetInfo *STI) {
EmitNops = Value;
this->STI = STI;
}

const MCSubtargetInfo *getSubtargetInfo() const { return STI; }

static bool classof(const MCFragment *F) {
return F->getKind() == MCFragment::FT_Align;
}
};

class MCFillFragment : public MCFragment {
uint8_t ValueSize;
/// Value to use for filling bytes.
Expand Down
148 changes: 69 additions & 79 deletions llvm/lib/MC/MCAssembler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -228,25 +228,26 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const {
return 4;

case MCFragment::FT_Align: {
const MCAlignFragment &AF = cast<MCAlignFragment>(F);
unsigned Offset = getFragmentOffset(AF);
unsigned Size = offsetToAlignment(Offset, AF.getAlignment());

// Insert extra Nops for code alignment if the target define
// shouldInsertExtraNopBytesForCodeAlign target hook.
if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() &&
getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size))
return Size;

// If we are padding with nops, force the padding to be larger than the
// minimum nop size.
if (Size > 0 && AF.hasEmitNops()) {
while (Size % getBackend().getMinimumNopSize())
Size += AF.getAlignment().value();
unsigned Offset = F.Offset + F.getFixedSize();
unsigned Size = offsetToAlignment(Offset, F.getAlignment());
auto &Frag = const_cast<MCFragment &>(F);
// In the nops mode, RISC-V style linker relaxation might adjust the size
// and add a fixup, even if `Size` is originally 0.
bool AlignFixup = false;
if (F.hasAlignEmitNops()) {
AlignFixup = getBackend().relaxAlign(Frag, Size);
// If the backend does not handle the fragment specially, pad with nops,
// but ensure that the padding is larger than the minimum nop size.
if (!AlignFixup)
while (Size % getBackend().getMinimumNopSize())
Size += F.getAlignment().value();
}
if (Size > AF.getMaxBytesToEmit())
return 0;
return Size;
if (!AlignFixup && Size > F.getAlignMaxBytesToEmit())
Size = 0;
Frag.VarContentEnd = F.VarContentStart + Size;
if (Frag.VarContentEnd > Frag.getParent()->ContentStorage.size())
Frag.getParent()->ContentStorage.resize(Frag.VarContentEnd);
return F.getSize();
}

case MCFragment::FT_Org: {
Expand Down Expand Up @@ -433,48 +434,48 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm,
const auto &EF = cast<MCFragment>(F);
OS << StringRef(EF.getContents().data(), EF.getContents().size());
OS << StringRef(EF.getVarContents().data(), EF.getVarContents().size());
break;
}
} break;

case MCFragment::FT_Align: {
++stats::EmittedAlignFragments;
const MCAlignFragment &AF = cast<MCAlignFragment>(F);
assert(AF.getFillLen() && "Invalid virtual align in concrete fragment!");
OS << StringRef(F.getContents().data(), F.getContents().size());
assert(F.getAlignFillLen() &&
"Invalid virtual align in concrete fragment!");

uint64_t Count = FragmentSize / AF.getFillLen();
assert(FragmentSize % AF.getFillLen() == 0 &&
uint64_t Count = (FragmentSize - F.getFixedSize()) / F.getAlignFillLen();
assert((FragmentSize - F.getFixedSize()) % F.getAlignFillLen() == 0 &&
"computeFragmentSize computed size is incorrect");

// See if we are aligning with nops, and if so do that first to try to fill
// the Count bytes. Then if that did not fill any bytes or there are any
// bytes left to fill use the Value and ValueSize to fill the rest.
// If we are aligning with nops, ask that target to emit the right data.
if (AF.hasEmitNops()) {
if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo()))
report_fatal_error("unable to write nop sequence of " +
Twine(Count) + " bytes");
break;
}

// Otherwise, write out in multiples of the value size.
for (uint64_t i = 0; i != Count; ++i) {
switch (AF.getFillLen()) {
default: llvm_unreachable("Invalid size!");
case 1:
OS << char(AF.getFill());
break;
case 2:
support::endian::write<uint16_t>(OS, AF.getFill(), Endian);
break;
case 4:
support::endian::write<uint32_t>(OS, AF.getFill(), Endian);
break;
case 8:
support::endian::write<uint64_t>(OS, AF.getFill(), Endian);
break;
// See if we are aligning with nops, and if so do that first to try to
// fill the Count bytes. Then if that did not fill any bytes or there are
// any bytes left to fill use the Value and ValueSize to fill the rest. If
// we are aligning with nops, ask that target to emit the right data.
if (F.hasAlignEmitNops()) {
if (!Asm.getBackend().writeNopData(OS, Count, F.getSubtargetInfo()))
report_fatal_error("unable to write nop sequence of " + Twine(Count) +
" bytes");
} else {
// Otherwise, write out in multiples of the value size.
for (uint64_t i = 0; i != Count; ++i) {
switch (F.getAlignFillLen()) {
default:
llvm_unreachable("Invalid size!");
case 1:
OS << char(F.getAlignFill());
break;
case 2:
support::endian::write<uint16_t>(OS, F.getAlignFill(), Endian);
break;
case 4:
support::endian::write<uint32_t>(OS, F.getAlignFill(), Endian);
break;
case 8:
support::endian::write<uint64_t>(OS, F.getAlignFill(), Endian);
break;
}
}
}
break;
}
} break;

case MCFragment::FT_Fill: {
++stats::EmittedFillFragments;
Expand Down Expand Up @@ -612,9 +613,7 @@ void MCAssembler::writeSectionData(raw_ostream &OS,
case MCFragment::FT_Align:
// Check that we aren't trying to write a non-zero value into a virtual
// section.
assert((cast<MCAlignFragment>(F).getFillLen() == 0 ||
cast<MCAlignFragment>(F).getFill() == 0) &&
"Invalid align in virtual section!");
assert(F.getAlignFill() == 0 && "Invalid align in virtual section!");
break;
case MCFragment::FT_Fill:
assert((cast<MCFillFragment>(F).getValue() == 0) &&
Expand Down Expand Up @@ -724,34 +723,25 @@ void MCAssembler::layout() {
for (MCSection &Sec : *this) {
for (MCFragment &F : Sec) {
// Process fragments with fixups here.
if (F.isEncoded()) {
auto Contents = F.getContents();
for (MCFixup &Fixup : F.getFixups()) {
auto Contents = F.getContents();
for (MCFixup &Fixup : F.getFixups()) {
uint64_t FixedValue;
MCValue Target;
evaluateFixup(F, Fixup, Target, FixedValue,
/*RecordReloc=*/true, Contents);
}
if (F.getVarFixups().size()) {
// In the variable part, fixup offsets are relative to the fixed part's
// start. Extend the variable contents to the left to account for the
// fixed part size.
Contents = MutableArrayRef(F.getParent()->ContentStorage)
.slice(F.VarContentStart - Contents.size(), F.getSize());
for (MCFixup &Fixup : F.getVarFixups()) {
uint64_t FixedValue;
MCValue Target;
evaluateFixup(F, Fixup, Target, FixedValue,
/*RecordReloc=*/true, Contents);
}
// In the variable part, fixup offsets are relative to the fixed part's
// start. Extend the variable contents to the left to account for the
// fixed part size.
auto VarFixups = F.getVarFixups();
if (VarFixups.size()) {
Contents =
MutableArrayRef(F.getParent()->ContentStorage)
.slice(F.VarContentStart - Contents.size(), F.getSize());
for (MCFixup &Fixup : VarFixups) {
uint64_t FixedValue;
MCValue Target;
evaluateFixup(F, Fixup, Target, FixedValue,
/*RecordReloc=*/true, Contents);
}
}
} else if (auto *AF = dyn_cast<MCAlignFragment>(&F)) {
// For RISC-V linker relaxation, an alignment relocation might be
// needed.
if (AF->hasEmitNops())
getBackend().shouldInsertFixupForCodeAlign(*this, *AF);
}
}
}
Expand Down
9 changes: 3 additions & 6 deletions llvm/lib/MC/MCExpr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -370,7 +370,6 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
}

int64_t Num;
unsigned Count;
if (DF) {
Displacement += DF->getContents().size();
} else if (F->getKind() == MCFragment::FT_Relaxable &&
Expand All @@ -379,11 +378,9 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm,
// After layout, during relocation generation, it can be treated as a
// data fragment.
Displacement += F->getSize();
} else if (auto *AF = dyn_cast<MCAlignFragment>(F);
AF && Layout && AF->hasEmitNops() &&
!Asm->getBackend().shouldInsertExtraNopBytesForCodeAlign(
*AF, Count)) {
Displacement += Asm->computeFragmentSize(*AF);
} else if (F->getKind() == MCFragment::FT_Align && Layout &&
F->isLinkerRelaxable()) {
Displacement += Asm->computeFragmentSize(*F);
} else if (auto *FF = dyn_cast<MCFillFragment>(F);
FF && FF->getNumValues().evaluateAsAbsolute(Num)) {
Displacement += Num * FF->getValueSize();
Expand Down
Loading
Loading