From eb8dc7802510cac41308158748610320be40c2ad Mon Sep 17 00:00:00 2001 From: Fangrui Song Date: Wed, 16 Jul 2025 00:20:28 -0700 Subject: [PATCH] =?UTF-8?q?[=F0=9D=98=80=F0=9D=97=BD=F0=9D=97=BF]=20initia?= =?UTF-8?q?l=20version?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Created using spr 1.3.5-bogner --- llvm/include/llvm/MC/MCAsmBackend.h | 7 +- llvm/include/llvm/MC/MCSection.h | 96 +++++++------ llvm/lib/MC/MCAssembler.cpp | 136 +++++++++--------- llvm/lib/MC/MCExpr.cpp | 8 +- llvm/lib/MC/MCFragment.cpp | 17 ++- llvm/lib/MC/MCObjectStreamer.cpp | 8 +- llvm/lib/MC/WasmObjectWriter.cpp | 22 ++- .../MCTargetDesc/LoongArchAsmBackend.cpp | 10 +- .../MCTargetDesc/LoongArchAsmBackend.h | 5 +- .../RISCV/MCTargetDesc/RISCVAsmBackend.cpp | 6 +- .../RISCV/MCTargetDesc/RISCVAsmBackend.h | 5 +- .../Target/X86/MCTargetDesc/X86AsmBackend.cpp | 4 +- 12 files changed, 157 insertions(+), 167 deletions(-) diff --git a/llvm/include/llvm/MC/MCAsmBackend.h b/llvm/include/llvm/MC/MCAsmBackend.h index 0322cbe6cbe8d..93259b0ea6d74 100644 --- a/llvm/include/llvm/MC/MCAsmBackend.h +++ b/llvm/include/llvm/MC/MCAsmBackend.h @@ -18,9 +18,7 @@ namespace llvm { -class MCAlignFragment; class MCFragment; -class MCLEBFragment; class MCSymbol; class MCAssembler; class MCContext; @@ -108,15 +106,14 @@ class LLVM_ABI MCAsmBackend { /// Hook to check if extra nop bytes must be inserted for alignment directive. /// For some targets this may be necessary in order to support linker /// relaxation. The number of bytes to insert are returned in Size. - virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF, + virtual bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF, unsigned &Size) { return false; } /// Hook which indicates if the target requires a fixup to be generated when /// handling an align directive in an executable section - virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) { + virtual bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) { return false; } diff --git a/llvm/include/llvm/MC/MCSection.h b/llvm/include/llvm/MC/MCSection.h index 296fdd8af0d14..5d8fa5680c27d 100644 --- a/llvm/include/llvm/MC/MCSection.h +++ b/llvm/include/llvm/MC/MCSection.h @@ -254,6 +254,19 @@ class MCFragment { uint32_t OperandStart; uint32_t OperandSize; } relax; + struct { + // The alignment to ensure, in bytes. + Align Alignment; + // The size of the integer (in bytes) of \p Value. + uint8_t FillLen; + // If true, fill with target-specific nop instructions. + bool EmitNops; + // The maximum number of bytes to emit; if the alignment + // cannot be satisfied in this width then this fragment is ignored. + unsigned MaxBytesToEmit; + // Value to use for filling padding bytes. + int64_t Fill; + } align; struct { // True if this is a sleb128, false if uleb128. bool IsSigned; @@ -441,6 +454,43 @@ class MCFragment { llvm::copy(Inst, S.begin() + u.relax.OperandStart); } + //== FT_Align functions + void makeAlign(Align Alignment, int64_t Fill, uint8_t FillLen, + unsigned MaxBytesToEmit) { + Kind = FT_Align; + u.align.EmitNops = false; + u.align.Alignment = Alignment; + u.align.Fill = Fill; + u.align.FillLen = FillLen; + u.align.MaxBytesToEmit = MaxBytesToEmit; + } + + Align getAlignment() const { + assert(Kind == FT_Align); + return u.align.Alignment; + } + int64_t getAlignFill() const { + assert(Kind == FT_Align); + return u.align.Fill; + } + uint8_t getAlignFillLen() const { + assert(Kind == FT_Align); + return u.align.FillLen; + } + unsigned getAlignMaxBytesToEmit() const { + assert(Kind == FT_Align); + return u.align.MaxBytesToEmit; + } + bool hasAlignEmitNops() const { + assert(Kind == FT_Align); + return u.align.EmitNops; + } + void setAlignEmitNops(bool Value, const MCSubtargetInfo *STI) { + assert(Kind == FT_Align); + u.align.EmitNops = Value; + this->STI = STI; + } + //== FT_LEB functions const MCExpr &getLEBValue() const { assert(Kind == FT_LEB); @@ -486,52 +536,6 @@ class MCEncodedFragment : public MCFragment { : MCFragment(FType, HasInstructions) {} }; -class MCAlignFragment : public MCFragment { - /// Flag to indicate that (optimal) NOPs should be emitted instead - /// of using the provided value. The exact interpretation of this flag is - /// target dependent. - bool EmitNops : 1; - - /// The alignment to ensure, in bytes. - Align Alignment; - - /// The size of the integer (in bytes) of \p Value. - uint8_t FillLen; - - /// The maximum number of bytes to emit; if the alignment - /// cannot be satisfied in this width then this fragment is ignored. - unsigned MaxBytesToEmit; - - /// Value to use for filling padding bytes. - int64_t Fill; - - /// When emitting Nops some subtargets have specific nop encodings. - const MCSubtargetInfo *STI = nullptr; - -public: - MCAlignFragment(Align Alignment, int64_t Fill, uint8_t FillLen, - unsigned MaxBytesToEmit) - : MCFragment(FT_Align, false), EmitNops(false), Alignment(Alignment), - FillLen(FillLen), MaxBytesToEmit(MaxBytesToEmit), Fill(Fill) {} - - Align getAlignment() const { return Alignment; } - int64_t getFill() const { return Fill; } - uint8_t getFillLen() const { return FillLen; } - unsigned getMaxBytesToEmit() const { return MaxBytesToEmit; } - - bool hasEmitNops() const { return EmitNops; } - void setEmitNops(bool Value, const MCSubtargetInfo *STI) { - EmitNops = Value; - this->STI = STI; - } - - const MCSubtargetInfo *getSubtargetInfo() const { return STI; } - - static bool classof(const MCFragment *F) { - return F->getKind() == MCFragment::FT_Align; - } -}; - class MCFillFragment : public MCFragment { uint8_t ValueSize; /// Value to use for filling bytes. diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp index d4d10e0cd74a5..a084af79e9ec8 100644 --- a/llvm/lib/MC/MCAssembler.cpp +++ b/llvm/lib/MC/MCAssembler.cpp @@ -228,25 +228,24 @@ uint64_t MCAssembler::computeFragmentSize(const MCFragment &F) const { return 4; case MCFragment::FT_Align: { - const MCAlignFragment &AF = cast(F); - unsigned Offset = getFragmentOffset(AF); - unsigned Size = offsetToAlignment(Offset, AF.getAlignment()); + unsigned Offset = F.Offset + F.getFixedSize(); + unsigned Size = offsetToAlignment(Offset, F.getAlignment()); // Insert extra Nops for code alignment if the target define // shouldInsertExtraNopBytesForCodeAlign target hook. - if (AF.getParent()->useCodeAlign() && AF.hasEmitNops() && - getBackend().shouldInsertExtraNopBytesForCodeAlign(AF, Size)) - return Size; + if (F.getParent()->useCodeAlign() && F.hasAlignEmitNops() && + getBackend().shouldInsertExtraNopBytesForCodeAlign(F, Size)) + return F.getFixedSize() + Size; // If we are padding with nops, force the padding to be larger than the // minimum nop size. - if (Size > 0 && AF.hasEmitNops()) { + if (Size > 0 && F.hasAlignEmitNops()) { while (Size % getBackend().getMinimumNopSize()) - Size += AF.getAlignment().value(); + Size += F.getAlignment().value(); } - if (Size > AF.getMaxBytesToEmit()) - return 0; - return Size; + if (Size > F.getAlignMaxBytesToEmit()) + Size = 0; + return F.getFixedSize() + Size; } case MCFragment::FT_Org: { @@ -416,6 +415,7 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, switch (F.getKind()) { case MCFragment::FT_Data: case MCFragment::FT_Relaxable: + case MCFragment::FT_Align: case MCFragment::FT_LEB: case MCFragment::FT_Dwarf: case MCFragment::FT_DwarfFrame: @@ -429,48 +429,46 @@ static void writeFragment(raw_ostream &OS, const MCAssembler &Asm, const auto &EF = cast(F); OS << StringRef(EF.getContents().data(), EF.getContents().size()); OS << StringRef(EF.getVarContents().data(), EF.getVarContents().size()); - break; - } - case MCFragment::FT_Align: { - ++stats::EmittedAlignFragments; - const MCAlignFragment &AF = cast(F); - assert(AF.getFillLen() && "Invalid virtual align in concrete fragment!"); - - uint64_t Count = FragmentSize / AF.getFillLen(); - assert(FragmentSize % AF.getFillLen() == 0 && - "computeFragmentSize computed size is incorrect"); - - // See if we are aligning with nops, and if so do that first to try to fill - // the Count bytes. Then if that did not fill any bytes or there are any - // bytes left to fill use the Value and ValueSize to fill the rest. - // If we are aligning with nops, ask that target to emit the right data. - if (AF.hasEmitNops()) { - if (!Asm.getBackend().writeNopData(OS, Count, AF.getSubtargetInfo())) - report_fatal_error("unable to write nop sequence of " + - Twine(Count) + " bytes"); - break; - } - - // Otherwise, write out in multiples of the value size. - for (uint64_t i = 0; i != Count; ++i) { - switch (AF.getFillLen()) { - default: llvm_unreachable("Invalid size!"); - case 1: - OS << char(AF.getFill()); - break; - case 2: - support::endian::write(OS, AF.getFill(), Endian); - break; - case 4: - support::endian::write(OS, AF.getFill(), Endian); - break; - case 8: - support::endian::write(OS, AF.getFill(), Endian); - break; + if (F.getKind() == MCFragment::FT_Align) { + ++stats::EmittedAlignFragments; + assert(F.getAlignFillLen() && + "Invalid virtual align in concrete fragment!"); + + uint64_t Count = (FragmentSize - F.getFixedSize()) / F.getAlignFillLen(); + assert((FragmentSize - F.getFixedSize()) % F.getAlignFillLen() == 0 && + "computeFragmentSize computed size is incorrect"); + + // See if we are aligning with nops, and if so do that first to try to + // fill the Count bytes. Then if that did not fill any bytes or there are + // any bytes left to fill use the Value and ValueSize to fill the rest. If + // we are aligning with nops, ask that target to emit the right data. + if (F.hasAlignEmitNops()) { + if (!Asm.getBackend().writeNopData(OS, Count, F.getSubtargetInfo())) + report_fatal_error("unable to write nop sequence of " + Twine(Count) + + " bytes"); + } else { + // Otherwise, write out in multiples of the value size. + for (uint64_t i = 0; i != Count; ++i) { + switch (F.getAlignFillLen()) { + default: + llvm_unreachable("Invalid size!"); + case 1: + OS << char(F.getAlignFill()); + break; + case 2: + support::endian::write(OS, F.getAlignFill(), Endian); + break; + case 4: + support::endian::write(OS, F.getAlignFill(), Endian); + break; + case 8: + support::endian::write(OS, F.getAlignFill(), Endian); + break; + } + } } } - break; - } + } break; case MCFragment::FT_Fill: { ++stats::EmittedFillFragments; @@ -608,9 +606,7 @@ void MCAssembler::writeSectionData(raw_ostream &OS, case MCFragment::FT_Align: // Check that we aren't trying to write a non-zero value into a virtual // section. - assert((cast(F).getFillLen() == 0 || - cast(F).getFill() == 0) && - "Invalid align in virtual section!"); + assert(F.getAlignFill() == 0 && "Invalid align in virtual section!"); break; case MCFragment::FT_Fill: assert((cast(F).getValue() == 0) && @@ -699,17 +695,22 @@ void MCAssembler::layout() { for (MCSection &Sec : *this) { for (MCFragment &F : Sec) { // Process fragments with fixups here. - if (F.isEncoded()) { - auto Contents = F.getContents(); - for (MCFixup &Fixup : F.getFixups()) { - uint64_t FixedValue; - MCValue Target; - evaluateFixup(F, Fixup, Target, FixedValue, - /*RecordReloc=*/true, Contents); - } - // In the variable part, fixup offsets are relative to the fixed part's - // start. Extend the variable contents to the left to account for the - // fixed part size. + auto Contents = F.getContents(); + for (MCFixup &Fixup : F.getFixups()) { + uint64_t FixedValue; + MCValue Target; + evaluateFixup(F, Fixup, Target, FixedValue, + /*RecordReloc=*/true, Contents); + } + if (F.getKind() == MCFragment::FT_Align) { + // For RISC-V linker relaxation, an alignment relocation might be + // needed. + if (F.hasAlignEmitNops()) + getBackend().shouldInsertFixupForCodeAlign(*this, F); + } else if (F.getVarFixups().size()) { + // In the variable part, fixup offsets are relative to the fixed + // part's start. Extend the variable contents to the left to account + // for the fixed part size. Contents = MutableArrayRef(F.getParent()->ContentStorage) .slice(F.VarContentStart - Contents.size(), F.getSize()); for (MCFixup &Fixup : F.getVarFixups()) { @@ -718,11 +719,6 @@ void MCAssembler::layout() { evaluateFixup(F, Fixup, Target, FixedValue, /*RecordReloc=*/true, Contents); } - } else if (auto *AF = dyn_cast(&F)) { - // For RISC-V linker relaxation, an alignment relocation might be - // needed. - if (AF->hasEmitNops()) - getBackend().shouldInsertFixupForCodeAlign(*this, *AF); } } } diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp index 22dff497911de..f0f1bd485258f 100644 --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -379,11 +379,11 @@ static void attemptToFoldSymbolOffsetDifference(const MCAssembler *Asm, // After layout, during relocation generation, it can be treated as a // data fragment. Displacement += F->getSize(); - } else if (auto *AF = dyn_cast(F); - AF && Layout && AF->hasEmitNops() && + } else if (F->getKind() == MCFragment::FT_Align && Layout && + F->hasAlignEmitNops() && !Asm->getBackend().shouldInsertExtraNopBytesForCodeAlign( - *AF, Count)) { - Displacement += Asm->computeFragmentSize(*AF); + *F, Count)) { + Displacement += Asm->computeFragmentSize(*F); } else if (auto *FF = dyn_cast(F); FF && FF->getNumValues().evaluateAsAbsolute(Num)) { Displacement += Num * FF->getValueSize(); diff --git a/llvm/lib/MC/MCFragment.cpp b/llvm/lib/MC/MCFragment.cpp index bfe045abe6e53..d01660f640c46 100644 --- a/llvm/lib/MC/MCFragment.cpp +++ b/llvm/lib/MC/MCFragment.cpp @@ -73,17 +73,9 @@ LLVM_DUMP_METHOD void MCFragment::dump() const { }; switch (getKind()) { - case MCFragment::FT_Align: { - const auto *AF = cast(this); - OS << " Align:" << AF->getAlignment().value() << " Fill:" << AF->getFill() - << " FillLen:" << unsigned(AF->getFillLen()) - << " MaxBytesToEmit:" << AF->getMaxBytesToEmit(); - if (AF->hasEmitNops()) - OS << " Nops"; - break; - } case MCFragment::FT_Data: case MCFragment::FT_Relaxable: + case MCFragment::FT_Align: case MCFragment::FT_LEB: case MCFragment::FT_Dwarf: case MCFragment::FT_DwarfFrame: { @@ -112,6 +104,13 @@ LLVM_DUMP_METHOD void MCFragment::dump() const { OS << ' '; getInst().dump_pretty(OS); break; + case MCFragment::FT_Align: + OS << "\n Align:" << getAlignment().value() << " Fill:" << getAlignFill() + << " FillLen:" << unsigned(getAlignFillLen()) + << " MaxBytesToEmit:" << getAlignMaxBytesToEmit(); + if (hasAlignEmitNops()) + OS << " Nops"; + break; case MCFragment::FT_LEB: { OS << " Value:"; getLEBValue().print(OS, nullptr); diff --git a/llvm/lib/MC/MCObjectStreamer.cpp b/llvm/lib/MC/MCObjectStreamer.cpp index c0cef0f06c57a..056b30b7df6f2 100644 --- a/llvm/lib/MC/MCObjectStreamer.cpp +++ b/llvm/lib/MC/MCObjectStreamer.cpp @@ -579,8 +579,8 @@ void MCObjectStreamer::emitValueToAlignment(Align Alignment, int64_t Fill, unsigned MaxBytesToEmit) { if (MaxBytesToEmit == 0) MaxBytesToEmit = Alignment.value(); - insert(getContext().allocFragment(Alignment, Fill, FillLen, - MaxBytesToEmit)); + MCFragment *F = getOrCreateDataFragment(); + F->makeAlign(Alignment, Fill, FillLen, MaxBytesToEmit); // Update the maximum alignment on the current section if necessary. MCSection *CurSec = getCurrentSectionOnly(); @@ -591,8 +591,8 @@ void MCObjectStreamer::emitCodeAlignment(Align Alignment, const MCSubtargetInfo *STI, unsigned MaxBytesToEmit) { emitValueToAlignment(Alignment, 0, 1, MaxBytesToEmit); - auto *F = cast(getCurrentFragment()); - F->setEmitNops(true, STI); + auto *F = getCurrentFragment(); + F->setAlignEmitNops(true, STI); // With RISC-V style linker relaxation, mark the section as linker-relaxable // if the alignment is larger than the minimum NOP size. unsigned Size; diff --git a/llvm/lib/MC/WasmObjectWriter.cpp b/llvm/lib/MC/WasmObjectWriter.cpp index 7af240a73f952..c0fad137b9037 100644 --- a/llvm/lib/MC/WasmObjectWriter.cpp +++ b/llvm/lib/MC/WasmObjectWriter.cpp @@ -696,14 +696,15 @@ static void addData(SmallVectorImpl &DataBytes, if (Frag.hasInstructions()) report_fatal_error("only data supported in data sections"); - if (auto *Align = dyn_cast(&Frag)) { - if (Align->getFillLen() != 1) + llvm::append_range(DataBytes, Frag.getContents()); + if (Frag.getKind() == MCFragment::FT_Align) { + if (Frag.getAlignFillLen() != 1) report_fatal_error("only byte values supported for alignment"); // If nops are requested, use zeros, as this is the data section. - uint8_t Value = Align->hasEmitNops() ? 0 : Align->getFill(); + uint8_t Value = Frag.hasAlignEmitNops() ? 0 : Frag.getAlignFill(); uint64_t Size = - std::min(alignTo(DataBytes.size(), Align->getAlignment()), - DataBytes.size() + Align->getMaxBytesToEmit()); + std::min(alignTo(DataBytes.size(), Frag.getAlignment()), + DataBytes.size() + Frag.getAlignMaxBytesToEmit()); DataBytes.resize(Size, Value); } else if (auto *Fill = dyn_cast(&Frag)) { int64_t NumValues; @@ -711,12 +712,10 @@ static void addData(SmallVectorImpl &DataBytes, llvm_unreachable("The fill should be an assembler constant"); DataBytes.insert(DataBytes.end(), Fill->getValueSize() * NumValues, Fill->getValue()); + } else if (Frag.getKind() == MCFragment::FT_LEB) { + llvm::append_range(DataBytes, Frag.getVarContents()); } else { - llvm::append_range(DataBytes, Frag.getContents()); - if (Frag.getKind() == MCFragment::FT_LEB) - llvm::append_range(DataBytes, Frag.getVarContents()); - else - assert(Frag.getKind() == MCFragment::FT_Data); + assert(Frag.getKind() == MCFragment::FT_Data); } } @@ -1867,8 +1866,7 @@ uint64_t WasmObjectWriter::writeOneObject(MCAssembler &Asm, const MCFragment &AlignFrag = *nextFrag; if (AlignFrag.getKind() != MCFragment::FT_Align) report_fatal_error(".init_array section should be aligned"); - if (cast(AlignFrag).getAlignment() != - Align(is64Bit() ? 8 : 4)) + if (AlignFrag.getAlignment() != Align(is64Bit() ? 8 : 4)) report_fatal_error( ".init_array section should be aligned for pointers"); diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp index 7b9f1156f9102..9ed17a9b39516 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.cpp @@ -182,14 +182,14 @@ void LoongArchAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup, // could satisfy alignment by removing Nops. // The function returns the total Nops Size we need to insert. bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign( - const MCAlignFragment &AF, unsigned &Size) { + const MCFragment &AF, unsigned &Size) { // Calculate Nops Size only when linker relaxation enabled. if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) return false; // Ignore alignment if MaxBytesToEmit is less than the minimum Nop size. const unsigned MinNopLen = 4; - if (AF.getMaxBytesToEmit() < MinNopLen) + if (AF.getAlignMaxBytesToEmit() < MinNopLen) return false; Size = AF.getAlignment().value() - MinNopLen; return AF.getAlignment() > MinNopLen; @@ -205,7 +205,7 @@ bool LoongArchAsmBackend::shouldInsertExtraNopBytesForCodeAlign( // maximum number of bytes to emit. The maximum number of bytes is zero // means ignore the emit limit. bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) { + MCFragment &AF) { // Insert the fixup only when linker relaxation enabled. if (!AF.getSubtargetInfo()->hasFeature(LoongArch::FeatureRelax)) return false; @@ -220,7 +220,7 @@ bool LoongArchAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCContext &Ctx = getContext(); const MCExpr *Dummy = MCConstantExpr::create(0, Ctx); MCFixup Fixup = MCFixup::create(0, Dummy, ELF::R_LARCH_ALIGN); - unsigned MaxBytesToEmit = AF.getMaxBytesToEmit(); + unsigned MaxBytesToEmit = AF.getAlignMaxBytesToEmit(); auto createExtendedValue = [&]() { const MCSymbolRefExpr *MCSym = getSecToAlignSym()[Sec]; @@ -434,7 +434,7 @@ bool LoongArchAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA, // Otherwise, check if the offset between the symbol and fragment is fully // resolved, unaffected by linker-relaxable fragments (e.g. instructions or - // offset-affected MCAlignFragment). Complements the generic + // offset-affected FT_Align fragments). Complements the generic // isSymbolRefDifferenceFullyResolvedImpl. if (!PCRelTemp) PCRelTemp = getContext().createTempSymbol(); diff --git a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h index b32ba067810ce..793e4093b1c9e 100644 --- a/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h +++ b/llvm/lib/Target/LoongArch/MCTargetDesc/LoongArchAsmBackend.h @@ -46,12 +46,11 @@ class LoongArchAsmBackend : public MCAsmBackend { bool IsResolved) override; // Return Size with extra Nop Bytes for alignment directive in code section. - bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF, + bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF, unsigned &Size) override; // Insert target specific fixup type for alignment directive in code section. - bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) override; + bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) override; bool shouldForceRelocation(const MCFixup &Fixup, const MCValue &Target); diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp index f76f8b3060d2a..cff8d47d86e1c 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -637,7 +637,7 @@ bool RISCVAsmBackend::isPCRelFixupResolved(const MCSymbol *SymA, // Otherwise, check if the offset between the symbol and fragment is fully // resolved, unaffected by linker-relaxable fragments (e.g. instructions or - // offset-affected MCAlignFragment). Complements the generic + // offset-affected FT_Align fragments). Complements the generic // isSymbolRefDifferenceFullyResolvedImpl. if (!PCRelTemp) PCRelTemp = getContext().createTempSymbol(); @@ -892,7 +892,7 @@ void RISCVAsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup, // could satisfy alignment by removing Nops. // The function return the total Nops Size we need to insert. bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign( - const MCAlignFragment &AF, unsigned &Size) { + const MCFragment &AF, unsigned &Size) { // Calculate Nops Size only when linker relaxation enabled. const MCSubtargetInfo *STI = AF.getSubtargetInfo(); if (!STI->hasFeature(RISCV::FeatureRelax)) @@ -914,7 +914,7 @@ bool RISCVAsmBackend::shouldInsertExtraNopBytesForCodeAlign( // The function insert fixup_riscv_align fixup which eventually will // transfer to R_RISCV_ALIGN relocation type. bool RISCVAsmBackend::shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) { + MCFragment &AF) { // Insert the fixup only when linker relaxation enabled. const MCSubtargetInfo *STI = AF.getSubtargetInfo(); if (!STI->hasFeature(RISCV::FeatureRelax)) diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h index 8c10fbec3c8fc..c4a1c74aa2c54 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h @@ -39,12 +39,11 @@ class RISCVAsmBackend : public MCAsmBackend { ~RISCVAsmBackend() override = default; // Return Size with extra Nop Bytes for alignment directive in code section. - bool shouldInsertExtraNopBytesForCodeAlign(const MCAlignFragment &AF, + bool shouldInsertExtraNopBytesForCodeAlign(const MCFragment &AF, unsigned &Size) override; // Insert target specific fixup type for alignment directive in code section. - bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, - MCAlignFragment &AF) override; + bool shouldInsertFixupForCodeAlign(MCAssembler &Asm, MCFragment &AF) override; std::optional evaluateFixup(const MCFragment &, MCFixup &, MCValue &, uint64_t &) override; diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp index 3d060c6f4a780..704257fe54242 100644 --- a/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp @@ -923,13 +923,11 @@ bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const { continue; } - const uint64_t OrigSize = Asm.computeFragmentSize(F); - // To keep the effects local, prefer to relax instructions closest to // the align directive. This is purely about human understandability // of the resulting code. If we later find a reason to expand // particular instructions over others, we can adjust. - unsigned RemainingSize = OrigSize; + unsigned RemainingSize = Asm.computeFragmentSize(F) - F.getFixedSize(); while (!Relaxable.empty() && RemainingSize != 0) { auto &RF = *Relaxable.pop_back_val(); // Give the backend a chance to play any tricks it wishes to increase