diff --git a/llvm/include/llvm/MC/MCAsmBackend.h b/llvm/include/llvm/MC/MCAsmBackend.h index 223896e8040f3..c69fcec586bdf 100644 --- a/llvm/include/llvm/MC/MCAsmBackend.h +++ b/llvm/include/llvm/MC/MCAsmBackend.h @@ -18,9 +18,7 @@ namespace llvm { -class MCAlignFragment; class MCFragment; -class MCLEBFragment; class MCSymbol; class MCAssembler; class MCContext; @@ -111,15 +109,14 @@ class LLVM_ABI MCAsmBackend { /// Hook to check if extra nop bytes must be inserted for alignment directive. /// For some targets this may be necessary in order to support linker /// relaxation. The number of bytes to insert are returned in Size. - virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF, + virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF, unsigned &Size) { return false; } /// Hook which indicates if the target requires a fixup to be generated when /// handling an align directive in an executable section - virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) { + virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) { return false; } diff --git a/llvm/include/llvm/MC/MCObjectStreamer.h b/llvm/include/llvm/MC/MCObjectStreamer.h index deaabddab3cc7..633334ee8a33f 100644 --- a/llvm/include/llvm/MC/MCObjectStreamer.h +++ b/llvm/include/llvm/MC/MCObjectStreamer.h @@ -77,9 +77,6 @@ class MCObjectStreamer : public MCStreamer { protected: bool changeSectionImpl(MCSection *Section, uint32_t Subsection); - MCAlignFragment *createAlignFragment(Align Alignment, int64_t Fill, - uint8_t FillLen, - unsigned MaxBytesToEmit); public: void visitUsedSymbol(const MCSymbol &Sym) override; diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h index 66ea8f8620940..fe8c13d108c2c 100644 --- a/llvm/include/llvm/MC/MCSection.h +++ b/llvm/include/llvm/MC/MCSection.h @@ -254,6 +254,19 @@ class MCFragment { uint32_t OperandStart; uint32_t OperandSize; } relax; + struct { + // The alignment to ensure, in bytes. + Align Alignment; + // The size of the integer (in bytes) of \p Value. + uint8_t FillLen; + // If true, fill with target-specific nop instructions. + bool EmitNops; + // The maximum number of bytes to emit; if the alignment + // cannot be satisfied in this width then this fragment is ignored. + unsigned MaxBytesToEmit; + // Value to use for filling padding bytes. + int64_t Fill; + } align; struct { // True if this is a sleb128, false if uleb128. bool IsSigned; @@ -283,6 +296,7 @@ class MCFragment { return false; case MCFragment::FT_Relaxable: case MCFragment::FT_Data: + case MCFragment::FT_Align: case MCFragment::FT_Dwarf: case MCFragment::FT_DwarfFrame: case MCFragment::FT_LEB: @@ -440,6 +454,38 @@ class MCFragment { llvm::copy(Inst, S.begin() + u.relax.OperandStart); } + //== FT_Align functions + void makeAlign(Align Alignment, int64_t Fill, uint8_t FillLen, + unsigned MaxBytesToEmit) { + Kind = FT_Align; + u.align.EmitNops = false; + u.align.Alignment = Alignment; + u.align.Fill = Fill; + u.align.FillLen = FillLen; + u.align.MaxBytesToEmit = MaxBytesToEmit; + } + + Align getAlignment() const { + assert(Kind == FT_Align); + return u.align.Alignment; + } + int64_t getAlignFill() const { + assert(Kind == FT_Align); + return u.align.Fill; + } + uint8_t getAlignFillLen() const { + assert(Kind == FT_Align); + return u.align.FillLen; + } + unsigned getAlignMaxBytesToEmit() const { + assert(Kind == FT_Align); + return u.align.MaxBytesToEmit; + } + bool hasAlignEmitNops() const { + assert(Kind == FT_Align); + return u.align.EmitNops; + } + //== FT_LEB functions void makeLEB(bool IsSigned, const MCExpr *Value) { assert(Kind == FT_Data); @@ -487,52 +533,6 @@ class MCEncodedFragment : public MCFragment { : MCFragment(FType, HasInstructions) {} }; -class MCAlignFragment : public MCFragment { - /// Flag to indicate that (optimal) NOPs should be emitted instead - /// of using the provided value. The exact interpretation of this flag is - /// target dependent. - bool EmitNops : 1; - - /// The alignment to ensure, in bytes. - Align Alignment; - - /// The size of the integer (in bytes) of \p Value. - uint8_t FillLen; - - /// The maximum number of bytes to emit; if the alignment - /// cannot be satisfied in this width then this fragment is ignored. - unsigned MaxBytesToEmit; - - /// Value to use for filling padding bytes. - int64_t Fill; - - /// When emitting Nops some subtargets have specific nop encodings. - const MCSubtargetInfo *STI = nullptr; - -public: - MCAlignFragment(Align Alignment, int64_t Fill, uint8_t FillLen, - unsigned MaxBytesToEmit) - : MCFragment(FT_Align, false), EmitNops(false), Alignment(Alignment), - FillLen(FillLen), MaxBytesToEmit(MaxBytesToEmit), Fill(Fill) {} - - Align getAlignment() const { return Alignment; } - int64_t getFill() const { return Fill; } - uint8_t getFillLen() const { return FillLen; } - unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; } - - bool hasEmitNops() const { return EmitNops; } - void setEmitNops(bool Value, const MCSubtargetInfo *STI) { - EmitNops = Value; - this->STI = STI; - } - - const MCSubtargetInfo *getSubtargetInfo() const { return STI; } - - static bool classof(const MCFragment *F) { - return F->getKind() == MCFragment::FT_Align; - } -}; - class MCFillFragment : public MCFragment { uint8_t ValueSize; /// Value to use for filling bytes. diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp index 48b222e7ba5a9..3ab402a8e2832 100644 --- a/llvm/lib/MC/MCAssembler.cpp +++ b/llvm/lib/MC/MCAssembler.cpp @@ -227,25 +227,24 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const { return 4; case MCFragment::FT_Align: { - const MCAlignFragment &AF = cast(F); - unsigned Offset = getFragmentOffset(AF); - unsigned Size = offsetToAlignment(Offset, AF.getAlignment()); + unsigned Offset = F.Offset + F.getFixedSize(); + unsigned Size = offsetToAlignment(Offset, F.getAlignment()); // Insert extra Nops for code alignment if the target define // shouldInsertExtraNopBytesForCodeAlign target hook. - if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() && - getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) - return Size; + if (F.getParent()->useCodeAlign() && F.hasAlignEmitNops() && + getBackend().shouldInsertExtraNopBytesForCodeAlign(F, Size)) + return F.getFixedSize() + Size; // If we are padding with nops, force the padding to be larger than the // minimum nop size. - if (Size > 0 && AF.hasEmitNops()) { + if (Size > 0 && F.hasAlignEmitNops()) { while (Size % getBackend().getMinimumNopSize()) - Size += AF.getAlignment().value(); + Size += F.getAlignment().value(); } - if (Size > AF.getMaxBytesToEmit()) - return 0; - return Size; + if (Size > F.getAlignMaxBytesToEmit()) + Size = 0; + return F.getFixedSize() + Size; } case MCFragment::FT_Org: { @@ -419,6 +418,7 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, switch (F.getKind()) { case MCFragment::FT_Data: case MCFragment::FT_Relaxable: + case MCFragment::FT_Align: case MCFragment::FT_LEB: case MCFragment::FT_Dwarf: case MCFragment::FT_DwarfFrame: @@ -431,48 +431,46 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, const auto &EF = cast(F); OS << StringRef(EF.getContents().data(), EF.getContents().size()); OS << StringRef(EF.getVarContents().data(), EF.getVarContents().size()); - break; - } - case MCFragment::FT_Align: { - ++stats::EmittedAlignFragments; - const MCAlignFragment &AF = cast(F); - assert(AF.getFillLen() && "Invalid virtual align in concrete fragment!"); - - uint64_t Count = FragmentSize / AF.getFillLen(); - assert(FragmentSize % AF.getFillLen() == 0 && - "computeFragmentSize computed size is incorrect"); - - // See if we are aligning with nops, and if so do that first to try to fill - // the Count bytes. Then if that did not fill any bytes or there are any - // bytes left to fill use the Value and ValueSize to fill the rest. - // If we are aligning with nops, ask that target to emit the right data. - if (AF.hasEmitNops()) { - if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo())) - report_fatal_error("unable to write nop sequence of " + - Twine(Count) + " bytes"); - break; - } - - // Otherwise, write out in multiples of the value size. - for (uint64_t i = 0; i != Count; ++i) { - switch (AF.getFillLen()) { - default: llvm_unreachable("Invalid size!"); - case 1: - OS << char(AF.getFill()); - break; - case 2: - support::endian::write(OS, AF.getFill(), Endian); - break; - case 4: - support::endian::write(OS, AF.getFill(), Endian); - break; - case 8: - support::endian::write(OS, AF.getFill(), Endian); - break; + if (F.getKind() == MCFragment::FT_Align) { + ++stats::EmittedAlignFragments; + assert(F.getAlignFillLen() && + "Invalid virtual align in concrete fragment!"); + + uint64_t Count = (FragmentSize - F.getFixedSize()) / F.getAlignFillLen(); + assert((FragmentSize - F.getFixedSize()) % F.getAlignFillLen() == 0 && + "computeFragmentSize computed size is incorrect"); + + // See if we are aligning with nops, and if so do that first to try to + // fill the Count bytes. Then if that did not fill any bytes or there are + // any bytes left to fill use the Value and ValueSize to fill the rest. If + // we are aligning with nops, ask that target to emit the right data. + if (F.hasAlignEmitNops()) { + if (!Asm.getBackend().writeNopData(OS, Count, F.getSubtargetInfo())) + report_fatal_error("unable to write nop sequence of " + Twine(Count) + + " bytes"); + } else { + // Otherwise, write out in multiples of the value size. + for (uint64_t i = 0; i != Count; ++i) { + switch (F.getAlignFillLen()) { + default: + llvm_unreachable("Invalid size!"); + case 1: + OS << char(F.getAlignFill()); + break; + case 2: + support::endian::write(OS, F.getAlignFill(), Endian); + break; + case 4: + support::endian::write(OS, F.getAlignFill(), Endian); + break; + case 8: + support::endian::write(OS, F.getAlignFill(), Endian); + break; + } + } } } - break; - } + } break; case MCFragment::FT_Fill: { ++stats::EmittedFillFragments; @@ -703,34 +701,30 @@ void MCAssembler::layout() { for (MCSection &Sec : *this) { for (MCFragment &F : Sec) { // Process fragments with fixups here. - if (F.isEncoded()) { - auto Contents = F.getContents(); - for (MCFixup &Fixup : F.getFixups()) { + auto Contents = F.getContents(); + for (MCFixup &Fixup : F.getFixups()) { + uint64_t FixedValue; + MCValue Target; + evaluateFixup(F, Fixup, Target, FixedValue, + /*RecordReloc=*/true, Contents); + } + if (F.getVarFixups().size()) { + // In the variable part, fixup offsets are relative to the fixed part's + // start. Extend the variable contents to the left to account for the + // fixed part size. + Contents = MutableArrayRef(F.getParent()->ContentStorage) + .slice(F.VarContentStart - Contents.size(), F.getSize()); + for (MCFixup &Fixup : F.getVarFixups()) { uint64_t FixedValue; MCValue Target; evaluateFixup(F, Fixup, Target, FixedValue, /*RecordReloc=*/true, Contents); } - // In the variable part, fixup offsets are relative to the fixed part's - // start. Extend the variable contents to the left to account for the - // fixed part size. - auto VarFixups = F.getVarFixups(); - if (VarFixups.size()) { - Contents = - MutableArrayRef(F.getParent()->ContentStorage) - .slice(F.VarContentStart - Contents.size(), F.getSize()); - for (MCFixup &Fixup : VarFixups) { - uint64_t FixedValue; - MCValue Target; - evaluateFixup(F, Fixup, Target, FixedValue, - /*RecordReloc=*/true, Contents); - } - } - } else if (auto *AF = dyn_cast(&F)) { + } else if (F.getKind() == MCFragment::FT_Align) { // For RISC-V linker relaxation, an alignment relocation might be // needed. - if (AF->hasEmitNops()) - getBackend().shouldInsertFixupForCodeAlign(*this, *AF); + if (F.hasAlignEmitNops()) + getBackend().shouldInsertFixupForCodeAlign(*this, F); } } } diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp index 22dff497911de..f0f1bd485258f 100644 --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -379,11 +379,11 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm, // After layout, during relocation generation, it can be treated as a // data fragment. Displacement += F->getSize(); - } else if (auto *AF = dyn_cast(F); - AF && Layout && AF->hasEmitNops() && + } else if (F->getKind() == MCFragment::FT_Align && Layout && + F->hasAlignEmitNops() && !Asm->getBackend().shouldInsertExtraNopBytesForCodeAlign( - *AF, Count)) { - Displacement += Asm->computeFragmentSize(*AF); + *F, Count)) { + Displacement += Asm->computeFragmentSize(*F); } else if (auto *FF = dyn_cast(F); FF && FF->getNumValues().evaluateAsAbsolute(Num)) { Displacement += Num * FF->getValueSize(); diff --git a/llvm/lib/MC/MCFragment.cpp b/llvm/lib/MC/MCFragment.cpp index fe7afd4b41378..569f2a5652869 100644 --- a/llvm/lib/MC/MCFragment.cpp +++ b/llvm/lib/MC/MCFragment.cpp @@ -72,17 +72,9 @@ LLVM_DUMP_METHOD void MCFragment::dump() const { }; switch (getKind()) { - case MCFragment::FT_Align: { - const auto *AF = cast(this); - OS << " Align:" << AF->getAlignment().value() << " Fill:" << AF->getFill() - << " FillLen:" << unsigned(AF->getFillLen()) - << " MaxBytesToEmit:" << AF->getMaxBytesToEmit(); - if (AF->hasEmitNops()) - OS << " Nops"; - break; - } case MCFragment::FT_Data: case MCFragment::FT_Relaxable: + case MCFragment::FT_Align: case MCFragment::FT_LEB: case MCFragment::FT_Dwarf: case MCFragment::FT_DwarfFrame: { @@ -111,6 +103,13 @@ LLVM_DUMP_METHOD void MCFragment::dump() const { OS << ' '; getInst().dump_pretty(OS); break; + case MCFragment::FT_Align: + OS << "\n Align:" << getAlignment().value() << " Fill:" << getAlignFill() + << " FillLen:" << unsigned(getAlignFillLen()) + << " MaxBytesToEmit:" << getAlignMaxBytesToEmit(); + if (hasAlignEmitNops()) + OS << " Nops"; + break; case MCFragment::FT_LEB: { OS << " Value:"; getLEBValue().print(OS, nullptr); diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp index 739e5fec8aa5d..af47df2967bb3 100644 --- a/llvm/lib/MC/MCObjectStreamer.cpp +++ b/llvm/lib/MC/MCObjectStreamer.cpp @@ -515,19 +515,15 @@ void MCObjectStreamer::emitBytes(StringRef Data) { DF->appendContents(ArrayRef(Data.data(), Data.size())); } -MCAlignFragment *MCObjectStreamer::createAlignFragment( - Align Alignment, int64_t Fill, uint8_t FillLen, unsigned MaxBytesToEmit) { - if (MaxBytesToEmit == 0) - MaxBytesToEmit = Alignment.value(); - return getContext().allocFragment(Alignment, Fill, FillLen, - MaxBytesToEmit); -} - void MCObjectStreamer::emitValueToAlignment(Align Alignment, int64_t Fill, uint8_t FillLen, unsigned MaxBytesToEmit) { - auto *F = createAlignFragment(Alignment, Fill, FillLen, MaxBytesToEmit); - insert(F); + if (MaxBytesToEmit == 0) + MaxBytesToEmit = Alignment.value(); + MCFragment *F = getCurrentFragment(); + F->makeAlign(Alignment, Fill, FillLen, MaxBytesToEmit); + newFragment(); + // Update the maximum alignment on the current section if necessary. F->getParent()->ensureMinAlignment(Alignment); } @@ -535,11 +531,10 @@ void MCObjectStreamer::emitValueToAlignment(Align Alignment, int64_t Fill, void MCObjectStreamer::emitCodeAlignment(Align Alignment, const MCSubtargetInfo *STI, unsigned MaxBytesToEmit) { - auto *F = createAlignFragment(Alignment, 0, 1, MaxBytesToEmit); - F->setEmitNops(true, STI); - insert(F); - // Update the maximum alignment on the current section if necessary. - F->getParent()->ensureMinAlignment(Alignment); + auto *F = getCurrentFragment(); + emitValueToAlignment(Alignment, 0, 1, MaxBytesToEmit); + F->u.align.EmitNops = true; + F->STI = STI; // With RISC-V style linker relaxation, mark the section as linker-relaxable // if the alignment is larger than the minimum NOP size. diff --git a/llvm/lib/MC/WasmObjectWriter.cpp b/llvm/lib/MC/WasmObjectWriter.cpp index da6dbf3028f26..3b99af47eb45b 100644 --- a/llvm/lib/MC/WasmObjectWriter.cpp +++ b/llvm/lib/MC/WasmObjectWriter.cpp @@ -696,14 +696,15 @@ static void addData(SmallVectorImpl &DataBytes, if (Frag.hasInstructions()) report_fatal_error("only data supported in data sections"); - if (auto *Align = dyn_cast(&Frag)) { - if (Align->getFillLen() != 1) + llvm::append_range(DataBytes, Frag.getContents()); + if (Frag.getKind() == MCFragment::FT_Align) { + if (Frag.getAlignFillLen() != 1) report_fatal_error("only byte values supported for alignment"); // If nops are requested, use zeros, as this is the data section. - uint8_t Value = Align->hasEmitNops() ? 0 : Align->getFill(); + uint8_t Value = Frag.hasAlignEmitNops() ? 0 : Frag.getAlignFill(); uint64_t Size = - std::min(alignTo(DataBytes.size(), Align->getAlignment()), - DataBytes.size() + Align->getMaxBytesToEmit()); + std::min(alignTo(DataBytes.size(), Frag.getAlignment()), + DataBytes.size() + Frag.getAlignMaxBytesToEmit()); DataBytes.resize(Size, Value); } else if (auto *Fill = dyn_cast(&Frag)) { int64_t NumValues; @@ -711,12 +712,10 @@ static void addData(SmallVectorImpl &DataBytes, llvm_unreachable("The fill should be an assembler constant"); DataBytes.insert(DataBytes.end(), Fill->getValueSize() * NumValues, Fill->getValue()); + } else if (Frag.getKind() == MCFragment::FT_LEB) { + llvm::append_range(DataBytes, Frag.getVarContents()); } else { - llvm::append_range(DataBytes, Frag.getContents()); - if (Frag.getKind() == MCFragment::FT_LEB) - llvm::append_range(DataBytes, Frag.getVarContents()); - else - assert(Frag.getKind() == MCFragment::FT_Data); + assert(Frag.getKind() == MCFragment::FT_Data); } } diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp index 7b9f1156f9102..032bfea71140f 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp @@ -182,14 +182,14 @@ void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup, // could satisfy alignment by removing Nops. // The function returns the total Nops Size we need to insert. bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign( - const MCAlignFragment &AF, unsigned &Size) { + const MCFragment &AF, unsigned &Size) { // Calculate Nops Size only when linker relaxation enabled. if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) return false; // Ignore alignment if MaxBytesToEmit is less than the minimum Nop size. const unsigned MinNopLen = 4; - if (AF.getMaxBytesToEmit() < MinNopLen) + if (AF.getAlignMaxBytesToEmit() < MinNopLen) return false; Size = AF.getAlignment().value() - MinNopLen; return AF.getAlignment() > MinNopLen; @@ -205,7 +205,7 @@ bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign( // maximum number of bytes to emit. The maximum number of bytes is zero // means ignore the emit limit. bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) { + MCFragment &AF) { // Insert the fixup only when linker relaxation enabled. if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) return false; @@ -219,8 +219,8 @@ bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCSection *Sec = AF.getParent(); MCContext &Ctx = getContext(); const MCExpr *Dummy = MCConstantExpr::create(0, Ctx); - MCFixup Fixup = MCFixup::create(0, Dummy, ELF::R_LARCH_ALIGN); - unsigned MaxBytesToEmit = AF.getMaxBytesToEmit(); + MCFixup Fixup = MCFixup::create(AF.getFixedSize(), Dummy, ELF::R_LARCH_ALIGN); + unsigned MaxBytesToEmit = AF.getAlignMaxBytesToEmit(); auto createExtendedValue = [&]() { const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec]; @@ -434,7 +434,7 @@ bool LoongArchAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA, // Otherwise, check if the offset between the symbol and fragment is fully // resolved, unaffected by linker-relaxable fragments (e.g. instructions or - // offset-affected MCAlignFragment). Complements the generic + // offset-affected FT_Align fragments). Complements the generic // isSymbolRefDifferenceFullyResolvedImpl. if (!PCRelTemp) PCRelTemp = getContext().createTempSymbol(); diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h index b32ba067810ce..793e4093b1c9e 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h @@ -46,12 +46,11 @@ class LoongArchAsmBackend : public MCAsmBackend { bool IsResolved) override; // Return Size with extra Nop Bytes for alignment directive in code section. - bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF, + bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF, unsigned &Size) override; // Insert target specific fixup type for alignment directive in code section. - bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) override; + bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) override; bool shouldForceRelocation(const MCFixup &Fixup, const MCValue &Target); diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp index f76f8b3060d2a..6bc313656f7c1 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -637,7 +637,7 @@ bool RISCVAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA, // Otherwise, check if the offset between the symbol and fragment is fully // resolved, unaffected by linker-relaxable fragments (e.g. instructions or - // offset-affected MCAlignFragment). Complements the generic + // offset-affected FT_Align fragments). Complements the generic // isSymbolRefDifferenceFullyResolvedImpl. if (!PCRelTemp) PCRelTemp = getContext().createTempSymbol(); @@ -892,7 +892,7 @@ void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup, // could satisfy alignment by removing Nops. // The function return the total Nops Size we need to insert. bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign( - const MCAlignFragment &AF, unsigned &Size) { + const MCFragment &AF, unsigned &Size) { // Calculate Nops Size only when linker relaxation enabled. const MCSubtargetInfo *STI = AF.getSubtargetInfo(); if (!STI->hasFeature(RISCV::FeatureRelax)) @@ -914,7 +914,7 @@ bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign( // The function insert fixup_riscv_align fixup which eventually will // transfer to R_RISCV_ALIGN relocation type. bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) { + MCFragment &AF) { // Insert the fixup only when linker relaxation enabled. const MCSubtargetInfo *STI = AF.getSubtargetInfo(); if (!STI->hasFeature(RISCV::FeatureRelax)) @@ -928,7 +928,7 @@ bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCContext &Ctx = getContext(); const MCExpr *Dummy = MCConstantExpr::create(0, Ctx); - MCFixup Fixup = MCFixup::create(0, Dummy, ELF::R_RISCV_ALIGN); + MCFixup Fixup = MCFixup::create(AF.getFixedSize(), Dummy, ELF::R_RISCV_ALIGN); uint64_t FixedValue = 0; MCValue NopBytes = MCValue::get(Count); diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h index 8c10fbec3c8fc..c4a1c74aa2c54 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h @@ -39,12 +39,11 @@ class RISCVAsmBackend : public MCAsmBackend { ~RISCVAsmBackend() override = default; // Return Size with extra Nop Bytes for alignment directive in code section. - bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF, + bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF, unsigned &Size) override; // Insert target specific fixup type for alignment directive in code section. - bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) override; + bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) override; std::optional evaluateFixup(const MCFragment &, MCFixup &, MCValue &, uint64_t &) override; diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index ad75ccd80b63d..e213923ccf38e 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -926,13 +926,11 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const { continue; } - const uint64_t OrigSize = Asm.computeFragmentSize(F); - // To keep the effects local, prefer to relax instructions closest to // the align directive. This is purely about human understandability // of the resulting code. If we later find a reason to expand // particular instructions over others, we can adjust. - unsigned RemainingSize = OrigSize; + unsigned RemainingSize = Asm.computeFragmentSize(F) - F.getFixedSize(); while (!Relaxable.empty() && RemainingSize != 0) { auto &RF = *Relaxable.pop_back_val(); // Give the backend a chance to play any tricks it wishes to increase diff --git a/llvm/test/MC/ELF/mc-dump.s b/llvm/test/MC/ELF/mc-dump.s index 5cc2e9fa50179..fd6cf95f4af44 100644 --- a/llvm/test/MC/ELF/mc-dump.s +++ b/llvm/test/MC/ELF/mc-dump.s @@ -6,9 +6,9 @@ #CHECK-LABEL:assembler backend - final-layout # CHECK:Sections:[ # CHECK-NEXT:MCSection Name:.text -# CHECK-NEXT:0 Data Size:0 [] +# CHECK-NEXT:0 Align Size:0+0 [] +# CHECK-NEXT: Align:4 Fill:0 FillLen:1 MaxBytesToEmit:4 Nops # CHECK-NEXT: Symbol @0 .text -# CHECK-NEXT:0 Align Align:4 Fill:0 FillLen:1 MaxBytesToEmit:4 Nops # CHECK-NEXT:0 Data Size:0 [] # CHECK-NEXT: Symbol @0 _start # CHECK-NEXT: Symbol @0 Temporary @@ -22,9 +22,9 @@ # CHECK-NEXT: Symbol @0 Temporary # CHECK-NEXT: Symbol @16 Temporary # CHECK-NEXT:MCSection Name:.data -# CHECK-NEXT:0 Data Size:0 [] +# CHECK-NEXT:0 Align Size:0+0 [] +# CHECK-NEXT: Align:4 Fill:0 FillLen:1 MaxBytesToEmit:4 # CHECK-NEXT: Symbol @0 .data -# CHECK-NEXT:0 Align Align:4 Fill:0 FillLen:1 MaxBytesToEmit:4 # CHECK-NEXT:0 Data Size:4 [01,00,00,00] # CHECK-NEXT:4 Fill Value:0 ValueSize:1 NumValues:1 # CHECK-NEXT:5 LEB Size:0+1 [15] Value:.Ltmp0-_start Signed:0 diff --git a/llvm/test/MC/RISCV/Relocations/mc-dump.s b/llvm/test/MC/RISCV/Relocations/mc-dump.s index f72258498169f..842851ce04843 100644 --- a/llvm/test/MC/RISCV/Relocations/mc-dump.s +++ b/llvm/test/MC/RISCV/Relocations/mc-dump.s @@ -3,16 +3,16 @@ # CHECK:Sections:[ # CHECK-NEXT:MCSection Name:.text -# CHECK-NEXT:0 Data Size:0 [] +# CHECK-NEXT:0 Align Size:0+0 [] +# CHECK-NEXT: Align:4 Fill:0 FillLen:1 MaxBytesToEmit:4 Nops # CHECK-NEXT: Symbol @0 .text -# CHECK-NEXT:0 Align Align:4 Fill:0 FillLen:1 MaxBytesToEmit:4 Nops # CHECK-NEXT:0 Data LinkerRelaxable Size:8 [97,00,00,00,e7,80,00,00] # CHECK-NEXT: Fixup @0 Value:specifier(19,ext) Kind:4023 # CHECK-NEXT: Symbol @0 $x -# CHECK-NEXT:8 Data Size:0 [] -# CHECK-NEXT:8 Align Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops -# CHECK-NEXT:12 Data Size:4 [13,05,30,00] -# CHECK-NEXT:16 Align Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops +# CHECK-NEXT:8 Align Size:0+0 [] +# CHECK-NEXT: Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops +# CHECK-NEXT:12 Align Size:4+0 [13,05,30,00] +# CHECK-NEXT: Align:8 Fill:0 FillLen:1 MaxBytesToEmit:8 Nops # CHECK-NEXT:] call ext