Move size and alignment information of regclass to TargetRegisterInfo
1. RegisterClass::getSize() is split into two functions:
- TargetRegisterInfo::getRegSizeInBits(const TargetRegisterClass &RC) const;
- TargetRegisterInfo::getSpillSize(const TargetRegisterClass &RC) const;
2. RegisterClass::getAlignment() is replaced by:
- TargetRegisterInfo::getSpillAlignment(const TargetRegisterClass &RC) const;
This will allow making those values depend on subtarget features in the
future.
Differential Revision: https://reviews.llvm.org/D31783
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@301221 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index 51ae178..f4a6766 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -117,8 +117,9 @@
// Otherwise, attempt to find a covering set of sub-register numbers.
// For example, Q0 on ARM is a composition of D0+D1.
unsigned CurPos = 0;
- // The size of the register in bits, assuming 8 bits per byte.
- unsigned RegSize = TRI.getMinimalPhysRegClass(MachineReg)->getSize() * 8;
+ // The size of the register in bits.
+ const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg);
+ unsigned RegSize = TRI.getRegSizeInBits(*RC);
// Keep track of the bits in the register we already emitted, so we
// can avoid emitting redundant aliasing subregs.
SmallBitVector Coverage(RegSize, false);
diff --git a/lib/CodeGen/GlobalISel/InstructionSelect.cpp b/lib/CodeGen/GlobalISel/InstructionSelect.cpp
index 26454c1..cf97c63 100644
--- a/lib/CodeGen/GlobalISel/InstructionSelect.cpp
+++ b/lib/CodeGen/GlobalISel/InstructionSelect.cpp
@@ -145,6 +145,8 @@
}
}
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+
// Now that selection is complete, there are no more generic vregs. Verify
// that the size of the now-constrained vreg is unchanged and that it has a
// register class.
@@ -165,7 +167,7 @@
continue;
if (VRegToType.second.isValid() &&
- VRegToType.second.getSizeInBits() > (RC->getSize() * 8)) {
+ VRegToType.second.getSizeInBits() > TRI.getRegSizeInBits(*RC)) {
reportGISelFailure(MF, TPC, MORE, "gisel-select",
"VReg has explicit size different from class size",
*MI);
diff --git a/lib/CodeGen/GlobalISel/RegisterBank.cpp b/lib/CodeGen/GlobalISel/RegisterBank.cpp
index 940957d..83b21e6 100644
--- a/lib/CodeGen/GlobalISel/RegisterBank.cpp
+++ b/lib/CodeGen/GlobalISel/RegisterBank.cpp
@@ -48,7 +48,7 @@
// Verify that the Size of the register bank is big enough to cover
// all the register classes it covers.
- assert((getSize() >= SubRC.getSize() * 8) &&
+ assert(getSize() >= TRI.getRegSizeInBits(SubRC) &&
"Size is not big enough for all the subclasses!");
assert(covers(SubRC) && "Not all subclasses are covered");
}
diff --git a/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp b/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
index b2df2f1..d5ae9a6 100644
--- a/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
+++ b/lib/CodeGen/GlobalISel/RegisterBankInfo.cpp
@@ -421,7 +421,7 @@
RC = MRI.getRegClass(Reg);
}
assert(RC && "Unable to deduce the register class");
- return RC->getSize() * 8;
+ return TRI.getRegSizeInBits(*RC);
}
//------------------------------------------------------------------------------
diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp
index 1354009..570a0cd 100644
--- a/lib/CodeGen/PrologEpilogInserter.cpp
+++ b/lib/CodeGen/PrologEpilogInserter.cpp
@@ -373,22 +373,22 @@
FixedSlot->Reg != Reg)
++FixedSlot;
+ unsigned Size = RegInfo->getSpillSize(*RC);
if (FixedSlot == FixedSpillSlots + NumFixedSpillSlots) {
// Nope, just spill it anywhere convenient.
- unsigned Align = RC->getAlignment();
+ unsigned Align = RegInfo->getSpillAlignment(*RC);
unsigned StackAlign = TFI->getStackAlignment();
// We may not be able to satisfy the desired alignment specification of
// the TargetRegisterClass if the stack alignment is smaller. Use the
// min.
Align = std::min(Align, StackAlign);
- FrameIdx = MFI.CreateStackObject(RC->getSize(), Align, true);
+ FrameIdx = MFI.CreateStackObject(Size, Align, true);
if ((unsigned)FrameIdx < MinCSFrameIndex) MinCSFrameIndex = FrameIdx;
if ((unsigned)FrameIdx > MaxCSFrameIndex) MaxCSFrameIndex = FrameIdx;
} else {
// Spill it to the stack where we must.
- FrameIdx =
- MFI.CreateFixedSpillStackObject(RC->getSize(), FixedSlot->Offset);
+ FrameIdx = MFI.CreateFixedSpillStackObject(Size, FixedSlot->Offset);
}
CS.setFrameIdx(FrameIdx);
diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp
index 283d846..c606b7b 100644
--- a/lib/CodeGen/RegAllocFast.cpp
+++ b/lib/CodeGen/RegAllocFast.cpp
@@ -212,8 +212,9 @@
return SS; // Already has space allocated?
// Allocate a new stack object for this spill location...
- int FrameIdx = MF->getFrameInfo().CreateSpillStackObject(RC->getSize(),
- RC->getAlignment());
+ unsigned Size = TRI->getSpillSize(*RC);
+ unsigned Align = TRI->getSpillAlignment(*RC);
+ int FrameIdx = MF->getFrameInfo().CreateSpillStackObject(Size, Align);
// Assign the slot.
StackSlotForVirtReg[VirtReg] = FrameIdx;
diff --git a/lib/CodeGen/RegisterScavenging.cpp b/lib/CodeGen/RegisterScavenging.cpp
index 6392136..35db30f 100644
--- a/lib/CodeGen/RegisterScavenging.cpp
+++ b/lib/CodeGen/RegisterScavenging.cpp
@@ -395,8 +395,8 @@
// Find an available scavenging slot with size and alignment matching
// the requirements of the class RC.
const MachineFrameInfo &MFI = MF.getFrameInfo();
- unsigned NeedSize = RC->getSize();
- unsigned NeedAlign = RC->getAlignment();
+ unsigned NeedSize = TRI->getSpillSize(*RC);
+ unsigned NeedAlign = TRI->getSpillAlignment(*RC);
unsigned SI = Scavenged.size(), Diff = std::numeric_limits<unsigned>::max();
int FIB = MFI.getObjectIndexBegin(), FIE = MFI.getObjectIndexEnd();
diff --git a/lib/CodeGen/StackMaps.cpp b/lib/CodeGen/StackMaps.cpp
index 1a8ec5b..315b059 100644
--- a/lib/CodeGen/StackMaps.cpp
+++ b/lib/CodeGen/StackMaps.cpp
@@ -161,7 +161,8 @@
if (SubRegIdx)
Offset = TRI->getSubRegIdxOffset(SubRegIdx);
- Locs.emplace_back(Location::Register, RC->getSize(), DwarfRegNum, Offset);
+ Locs.emplace_back(Location::Register, TRI->getSpillSize(*RC),
+ DwarfRegNum, Offset);
return ++MOI;
}
@@ -245,7 +246,7 @@
StackMaps::LiveOutReg
StackMaps::createLiveOutReg(unsigned Reg, const TargetRegisterInfo *TRI) const {
unsigned DwarfRegNum = getDwarfRegNum(Reg, TRI);
- unsigned Size = TRI->getMinimalPhysRegClass(Reg)->getSize();
+ unsigned Size = TRI->getSpillSize(*TRI->getMinimalPhysRegClass(Reg));
return LiveOutReg(Reg, DwarfRegNum, Size);
}
diff --git a/lib/CodeGen/TargetInstrInfo.cpp b/lib/CodeGen/TargetInstrInfo.cpp
index 69b2517..14c5adc 100644
--- a/lib/CodeGen/TargetInstrInfo.cpp
+++ b/lib/CodeGen/TargetInstrInfo.cpp
@@ -345,12 +345,12 @@
unsigned SubIdx, unsigned &Size,
unsigned &Offset,
const MachineFunction &MF) const {
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
if (!SubIdx) {
- Size = RC->getSize();
+ Size = TRI->getSpillSize(*RC);
Offset = 0;
return true;
}
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
// Convert bit size to byte size to be consistent with
// MCRegisterClass::getSize().
@@ -364,10 +364,10 @@
Size = BitSize /= 8;
Offset = (unsigned)BitOffset / 8;
- assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
+ assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
if (!MF.getDataLayout().isLittleEndian()) {
- Offset = RC->getSize() - (Offset + Size);
+ Offset = TRI->getSpillSize(*RC) - (Offset + Size);
}
return true;
}
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index 27630a3..8d7222f 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -1299,7 +1299,7 @@
for (int i = SuperRegRC.find_first(); i >= 0; i = SuperRegRC.find_next(i)) {
const TargetRegisterClass *SuperRC = TRI->getRegClass(i);
// We want the largest possible spill size.
- if (SuperRC->getSize() <= BestRC->getSize())
+ if (TRI->getSpillSize(*SuperRC) <= TRI->getSpillSize(*BestRC))
continue;
if (!isLegalRC(SuperRC))
continue;
diff --git a/lib/CodeGen/TargetRegisterInfo.cpp b/lib/CodeGen/TargetRegisterInfo.cpp
index 66cdad2..ae579af 100644
--- a/lib/CodeGen/TargetRegisterInfo.cpp
+++ b/lib/CodeGen/TargetRegisterInfo.cpp
@@ -265,7 +265,7 @@
const TargetRegisterClass *BestRC = nullptr;
unsigned *BestPreA = &PreA;
unsigned *BestPreB = &PreB;
- if (RCA->getSize() < RCB->getSize()) {
+ if (getRegSizeInBits(*RCA) < getRegSizeInBits(*RCB)) {
std::swap(RCA, RCB);
std::swap(SubA, SubB);
std::swap(BestPreA, BestPreB);
@@ -273,7 +273,7 @@
// Also terminate the search one we have found a register class as small as
// RCA.
- unsigned MinSize = RCA->getSize();
+ unsigned MinSize = getRegSizeInBits(*RCA);
for (SuperRegClassIterator IA(RCA, this, true); IA.isValid(); ++IA) {
unsigned FinalA = composeSubRegIndices(IA.getSubReg(), SubA);
@@ -281,7 +281,7 @@
// Check if a common super-register class exists for this index pair.
const TargetRegisterClass *RC =
firstCommonClass(IA.getMask(), IB.getMask(), this);
- if (!RC || RC->getSize() < MinSize)
+ if (!RC || getRegSizeInBits(*RC) < MinSize)
continue;
// The indexes must compose identically: PreA+SubA == PreB+SubB.
@@ -290,7 +290,7 @@
continue;
// Is RC a better candidate than BestRC?
- if (BestRC && RC->getSize() >= BestRC->getSize())
+ if (BestRC && getRegSizeInBits(*RC) >= getRegSizeInBits(*BestRC))
continue;
// Yes, RC is the smallest super-register seen so far.
@@ -299,7 +299,7 @@
*BestPreB = IB.getSubReg();
// Bail early if we reached MinSize. We won't find a better candidate.
- if (BestRC->getSize() == MinSize)
+ if (getRegSizeInBits(*BestRC) == MinSize)
return BestRC;
}
}
diff --git a/lib/CodeGen/VirtRegMap.cpp b/lib/CodeGen/VirtRegMap.cpp
index c894601..d10ca1a 100644
--- a/lib/CodeGen/VirtRegMap.cpp
+++ b/lib/CodeGen/VirtRegMap.cpp
@@ -73,8 +73,9 @@
}
unsigned VirtRegMap::createSpillSlot(const TargetRegisterClass *RC) {
- int SS = MF->getFrameInfo().CreateSpillStackObject(RC->getSize(),
- RC->getAlignment());
+ unsigned Size = TRI->getSpillSize(*RC);
+ unsigned Align = TRI->getSpillAlignment(*RC);
+ int SS = MF->getFrameInfo().CreateSpillStackObject(Size, Align);
++NumSpillSlots;
return SS;
}
diff --git a/lib/Target/AArch64/AArch64FrameLowering.cpp b/lib/Target/AArch64/AArch64FrameLowering.cpp
index 400e307..dc916c0 100644
--- a/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -1203,8 +1203,11 @@
// If we didn't find an extra callee-saved register to spill, create
// an emergency spill slot.
if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) {
- const TargetRegisterClass *RC = &AArch64::GPR64RegClass;
- int FI = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), false);
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ const TargetRegisterClass &RC = AArch64::GPR64RegClass;
+ unsigned Size = TRI->getSpillSize(RC);
+ unsigned Align = TRI->getSpillAlignment(RC);
+ int FI = MFI.CreateStackObject(Size, Align, false);
RS->addScavengingFrameIndex(FI);
DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
<< " as the emergency spill slot.\n");
diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp
index 57ff463f..cb26882 100644
--- a/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2320,7 +2320,7 @@
PtrInfo, MachineMemOperand::MOStore, MFI.getObjectSize(FI), Align);
unsigned Opc = 0;
bool Offset = true;
- switch (RC->getSize()) {
+ switch (TRI->getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::STRBui;
@@ -2424,7 +2424,7 @@
unsigned Opc = 0;
bool Offset = true;
- switch (RC->getSize()) {
+ switch (TRI->getSpillSize(*RC)) {
case 1:
if (AArch64::FPR8RegClass.hasSubClassEq(RC))
Opc = AArch64::LDRBui;
@@ -2649,7 +2649,8 @@
};
if (DstMO.getSubReg() == 0 && SrcMO.getSubReg() == 0) {
- assert(getRegClass(DstReg)->getSize() == getRegClass(SrcReg)->getSize() &&
+ assert(TRI.getRegSizeInBits(*getRegClass(DstReg)) ==
+ TRI.getRegSizeInBits(*getRegClass(SrcReg)) &&
"Mismatched register size in non subreg COPY");
if (IsSpill)
storeRegToStackSlot(MBB, InsertPt, SrcReg, SrcMO.isKill(), FrameIndex,
@@ -2735,7 +2736,8 @@
}
if (FillRC) {
- assert(getRegClass(SrcReg)->getSize() == FillRC->getSize() &&
+ assert(TRI.getRegSizeInBits(*getRegClass(SrcReg)) ==
+ TRI.getRegSizeInBits(*FillRC) &&
"Mismatched regclass size on folded subreg COPY");
loadRegFromStackSlot(MBB, InsertPt, DstReg, FrameIndex, FillRC, &TRI);
MachineInstr &LoadMI = *--InsertPt;
diff --git a/lib/Target/AMDGPU/GCNRegPressure.cpp b/lib/Target/AMDGPU/GCNRegPressure.cpp
index 4ecfa11..bf16a82 100644
--- a/lib/Target/AMDGPU/GCNRegPressure.cpp
+++ b/lib/Target/AMDGPU/GCNRegPressure.cpp
@@ -83,8 +83,8 @@
const auto RC = MRI.getRegClass(Reg);
auto STI = static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo());
return STI->isSGPRClass(RC) ?
- (RC->getSize() == 4 ? SGPR32 : SGPR_TUPLE) :
- (RC->getSize() == 4 ? VGPR32 : VGPR_TUPLE);
+ (STI->getRegSizeInBits(*RC) == 32 ? SGPR32 : SGPR_TUPLE) :
+ (STI->getRegSizeInBits(*RC) == 32 ? VGPR32 : VGPR_TUPLE);
}
void GCNRegPressure::inc(unsigned Reg,
diff --git a/lib/Target/AMDGPU/SIFrameLowering.cpp b/lib/Target/AMDGPU/SIFrameLowering.cpp
index abe6af9..9c21518 100644
--- a/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -469,7 +469,7 @@
// this also ensures we shouldn't need a register for the offset when
// emergency scavenging.
int ScavengeFI = MFI.CreateFixedObject(
- AMDGPU::SGPR_32RegClass.getSize(), 0, false);
+ TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
RS->addScavengingFrameIndex(ScavengeFI);
}
}
diff --git a/lib/Target/AMDGPU/SIISelLowering.cpp b/lib/Target/AMDGPU/SIISelLowering.cpp
index c2e4613..ce74a7c 100644
--- a/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1644,7 +1644,7 @@
const TargetRegisterClass *SuperRC,
unsigned VecReg,
int Offset) {
- int NumElts = SuperRC->getSize() / 4;
+ int NumElts = TRI.getRegSizeInBits(*SuperRC) / 32;
// Skip out of bounds offsets, or else we would end up using an undefined
// register.
@@ -1793,17 +1793,18 @@
return LoopBB;
}
-static unsigned getMOVRELDPseudo(const TargetRegisterClass *VecRC) {
- switch (VecRC->getSize()) {
- case 4:
+static unsigned getMOVRELDPseudo(const SIRegisterInfo &TRI,
+ const TargetRegisterClass *VecRC) {
+ switch (TRI.getRegSizeInBits(*VecRC)) {
+ case 32: // 4 bytes
return AMDGPU::V_MOVRELD_B32_V1;
- case 8:
+ case 64: // 8 bytes
return AMDGPU::V_MOVRELD_B32_V2;
- case 16:
+ case 128: // 16 bytes
return AMDGPU::V_MOVRELD_B32_V4;
- case 32:
+ case 256: // 32 bytes
return AMDGPU::V_MOVRELD_B32_V8;
- case 64:
+ case 512: // 64 bytes
return AMDGPU::V_MOVRELD_B32_V16;
default:
llvm_unreachable("unsupported size for MOVRELD pseudos");
@@ -1863,7 +1864,7 @@
BuildMI(MBB, I, DL, TII->get(AMDGPU::S_SET_GPR_IDX_OFF));
} else {
- const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC));
+ const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
BuildMI(MBB, I, DL, MovRelDesc)
.addReg(Dst, RegState::Define)
@@ -1907,7 +1908,7 @@
.addReg(PhiReg, RegState::Implicit)
.addReg(AMDGPU::M0, RegState::Implicit);
} else {
- const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(VecRC));
+ const MCInstrDesc &MovRelDesc = TII->get(getMOVRELDPseudo(TRI, VecRC));
BuildMI(*LoopBB, InsPt, DL, MovRelDesc)
.addReg(Dst, RegState::Define)
diff --git a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index c2a3e62..9122cd7 100644
--- a/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -428,8 +428,8 @@
const MachineInstr &MIA = *MI;
const TargetRegisterClass *RC = TII->getOpRegClass(MIA, OpNo);
- unsigned Size = RC->getSize();
- Result.second = Result.first + (Size / 4);
+ unsigned Size = TRI->getRegSizeInBits(*RC);
+ Result.second = Result.first + (Size / 32);
return Result;
}
diff --git a/lib/Target/AMDGPU/SIInsertWaits.cpp b/lib/Target/AMDGPU/SIInsertWaits.cpp
index 47257ce..9f32ecf 100644
--- a/lib/Target/AMDGPU/SIInsertWaits.cpp
+++ b/lib/Target/AMDGPU/SIInsertWaits.cpp
@@ -216,8 +216,8 @@
// XXX - What if this is a write into a super register?
const TargetRegisterClass *RC = TII->getOpRegClass(MI, 0);
- unsigned Size = RC->getSize();
- Result.Named.LGKM = Size > 4 ? 2 : 1;
+ unsigned Size = TRI->getRegSizeInBits(*RC);
+ Result.Named.LGKM = Size > 32 ? 2 : 1;
} else {
// s_dcache_inv etc. do not have a a destination register. Assume we
// want a wait on these.
@@ -289,12 +289,12 @@
RegInterval SIInsertWaits::getRegInterval(const TargetRegisterClass *RC,
const MachineOperand &Reg) const {
- unsigned Size = RC->getSize();
- assert(Size >= 4);
+ unsigned Size = TRI->getRegSizeInBits(*RC);
+ assert(Size >= 32);
RegInterval Result;
Result.first = TRI->getEncodingValue(Reg.getReg());
- Result.second = Result.first + Size / 4;
+ Result.second = Result.first + Size / 32;
return Result;
}
diff --git a/lib/Target/AMDGPU/SIInstrInfo.cpp b/lib/Target/AMDGPU/SIInstrInfo.cpp
index c5af8a1..92e452a 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.cpp
+++ b/lib/Target/AMDGPU/SIInstrInfo.cpp
@@ -250,11 +250,11 @@
unsigned EltSize;
if (LdSt.mayLoad())
- EltSize = getOpRegClass(LdSt, 0)->getSize() / 2;
+ EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16;
else {
assert(LdSt.mayStore());
int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0);
- EltSize = getOpRegClass(LdSt, Data0Idx)->getSize();
+ EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8;
}
if (isStride64(Opc))
@@ -350,7 +350,7 @@
FirstLdSt.getParent()->getParent()->getRegInfo();
const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg());
- return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold;
+ return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold;
}
static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB,
@@ -438,7 +438,7 @@
unsigned EltSize = 4;
unsigned Opcode = AMDGPU::V_MOV_B32_e32;
if (RI.isSGPRClass(RC)) {
- if (RC->getSize() > 4) {
+ if (RI.getRegSizeInBits(*RC) > 32) {
Opcode = AMDGPU::S_MOV_B64;
EltSize = 8;
} else {
@@ -498,11 +498,11 @@
unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const {
- if (DstRC->getSize() == 4) {
+ if (RI.getRegSizeInBits(*DstRC) == 32) {
return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
- } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) {
+ } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) {
return AMDGPU::S_MOV_B64;
- } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) {
+ } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) {
return AMDGPU::V_MOV_B64_PSEUDO;
}
return AMDGPU::COPY;
@@ -562,17 +562,18 @@
MachineMemOperand *MMO
= MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore,
Size, Align);
+ unsigned SpillSize = TRI->getSpillSize(*RC);
if (RI.isSGPRClass(RC)) {
MFI->setHasSpilledSGPRs();
// We are only allowed to create one new instruction when spilling
// registers, so we need to use pseudo instruction for spilling SGPRs.
- const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize()));
+ const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize));
// The SGPR spill/restore instructions only work on number sgprs, so we need
// to make sure we are using the correct register class.
- if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) {
+ if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass);
}
@@ -607,7 +608,7 @@
assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
- unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize());
+ unsigned Opcode = getVGPRSpillSaveOpcode(SpillSize);
MFI->setHasSpilledVGPRs();
BuildMI(MBB, MI, DL, get(Opcode))
.addReg(SrcReg, getKillRegState(isKill)) // data
@@ -665,6 +666,7 @@
DebugLoc DL = MBB.findDebugLoc(MI);
unsigned Align = FrameInfo.getObjectAlignment(FrameIndex);
unsigned Size = FrameInfo.getObjectSize(FrameIndex);
+ unsigned SpillSize = TRI->getSpillSize(*RC);
MachinePointerInfo PtrInfo
= MachinePointerInfo::getFixedStack(*MF, FrameIndex);
@@ -675,8 +677,8 @@
if (RI.isSGPRClass(RC)) {
// FIXME: Maybe this should not include a memoperand because it will be
// lowered to non-memory instructions.
- const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize()));
- if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) {
+ const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize));
+ if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) {
MachineRegisterInfo &MRI = MF->getRegInfo();
MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass);
}
@@ -706,7 +708,7 @@
assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected");
- unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize());
+ unsigned Opcode = getVGPRSpillRestoreOpcode(SpillSize);
BuildMI(MBB, MI, DL, get(Opcode), DestReg)
.addFrameIndex(FrameIndex) // vaddr
.addReg(MFI->getScratchRSrcReg()) // scratch_rsrc
@@ -1445,9 +1447,9 @@
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
- unsigned DstSize = DstRC->getSize();
+ unsigned DstSize = RI.getRegSizeInBits(*DstRC);
- if (DstSize == 4) {
+ if (DstSize == 32) {
unsigned SelOp = Pred == SCC_TRUE ?
AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32;
@@ -1461,7 +1463,7 @@
return;
}
- if (DstSize == 8 && Pred == SCC_TRUE) {
+ if (DstSize == 64 && Pred == SCC_TRUE) {
MachineInstr *Select =
BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg)
.addReg(FalseReg)
@@ -1488,7 +1490,7 @@
unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32;
const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass;
const int16_t *SubIndices = Sub0_15;
- int NElts = DstSize / 4;
+ int NElts = DstSize / 32;
// 64-bit select is only avaialble for SALU.
if (Pred == SCC_TRUE) {
@@ -2747,7 +2749,7 @@
const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg);
const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC);
unsigned DstReg = MRI.createVirtualRegister(SRC);
- unsigned SubRegs = VRC->getSize() / 4;
+ unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32;
SmallVector<unsigned, 8> SRegs;
for (unsigned i = 0; i < SubRegs; ++i) {
diff --git a/lib/Target/AMDGPU/SIInstrInfo.h b/lib/Target/AMDGPU/SIInstrInfo.h
index 659473c..03a5ef7 100644
--- a/lib/Target/AMDGPU/SIInstrInfo.h
+++ b/lib/Target/AMDGPU/SIInstrInfo.h
@@ -626,13 +626,13 @@
return 4;
}
- return RI.getRegClass(OpInfo.RegClass)->getSize();
+ return RI.getRegSizeInBits(*RI.getRegClass(OpInfo.RegClass)) / 8;
}
/// \brief This form should usually be preferred since it handles operands
/// with unknown register classes.
unsigned getOpSize(const MachineInstr &MI, unsigned OpNo) const {
- return getOpRegClass(MI, OpNo)->getSize();
+ return RI.getRegSizeInBits(*getOpRegClass(MI, OpNo)) / 8;
}
/// \returns true if it is legal for the operand at index \p OpNo
diff --git a/lib/Target/AMDGPU/SIRegisterInfo.cpp b/lib/Target/AMDGPU/SIRegisterInfo.cpp
index 098c672..f2fdf96 100644
--- a/lib/Target/AMDGPU/SIRegisterInfo.cpp
+++ b/lib/Target/AMDGPU/SIRegisterInfo.cpp
@@ -615,7 +615,8 @@
if (SpillToSMEM && isSGPRClass(RC)) {
// XXX - if private_element_size is larger than 4 it might be useful to be
// able to spill wider vmem spills.
- std::tie(EltSize, ScalarStoreOp) = getSpillEltSize(RC->getSize(), true);
+ std::tie(EltSize, ScalarStoreOp) =
+ getSpillEltSize(getRegSizeInBits(*RC) / 8, true);
}
ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
@@ -775,7 +776,8 @@
if (SpillToSMEM && isSGPRClass(RC)) {
// XXX - if private_element_size is larger than 4 it might be useful to be
// able to spill wider vmem spills.
- std::tie(EltSize, ScalarLoadOp) = getSpillEltSize(RC->getSize(), false);
+ std::tie(EltSize, ScalarLoadOp) =
+ getSpillEltSize(getRegSizeInBits(*RC) / 8, false);
}
ArrayRef<int16_t> SplitParts = getRegSplitParts(RC, EltSize);
@@ -1038,20 +1040,21 @@
// TODO: It might be helpful to have some target specific flags in
// TargetRegisterClass to mark which classes are VGPRs to make this trivial.
bool SIRegisterInfo::hasVGPRs(const TargetRegisterClass *RC) const {
- switch (RC->getSize()) {
- case 0: return false;
- case 1: return false;
- case 4:
- return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
- case 8:
- return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
- case 12:
- return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
- case 16:
- return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
+ unsigned Size = getRegSizeInBits(*RC);
+ if (Size < 32)
+ return false;
+ switch (Size) {
case 32:
- return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
+ return getCommonSubClass(&AMDGPU::VGPR_32RegClass, RC) != nullptr;
case 64:
+ return getCommonSubClass(&AMDGPU::VReg_64RegClass, RC) != nullptr;
+ case 96:
+ return getCommonSubClass(&AMDGPU::VReg_96RegClass, RC) != nullptr;
+ case 128:
+ return getCommonSubClass(&AMDGPU::VReg_128RegClass, RC) != nullptr;
+ case 256:
+ return getCommonSubClass(&AMDGPU::VReg_256RegClass, RC) != nullptr;
+ case 512:
return getCommonSubClass(&AMDGPU::VReg_512RegClass, RC) != nullptr;
default:
llvm_unreachable("Invalid register class size");
@@ -1060,18 +1063,18 @@
const TargetRegisterClass *SIRegisterInfo::getEquivalentVGPRClass(
const TargetRegisterClass *SRC) const {
- switch (SRC->getSize()) {
- case 4:
- return &AMDGPU::VGPR_32RegClass;
- case 8:
- return &AMDGPU::VReg_64RegClass;
- case 12:
- return &AMDGPU::VReg_96RegClass;
- case 16:
- return &AMDGPU::VReg_128RegClass;
+ switch (getRegSizeInBits(*SRC)) {
case 32:
- return &AMDGPU::VReg_256RegClass;
+ return &AMDGPU::VGPR_32RegClass;
case 64:
+ return &AMDGPU::VReg_64RegClass;
+ case 96:
+ return &AMDGPU::VReg_96RegClass;
+ case 128:
+ return &AMDGPU::VReg_128RegClass;
+ case 256:
+ return &AMDGPU::VReg_256RegClass;
+ case 512:
return &AMDGPU::VReg_512RegClass;
default:
llvm_unreachable("Invalid register class size");
@@ -1080,16 +1083,16 @@
const TargetRegisterClass *SIRegisterInfo::getEquivalentSGPRClass(
const TargetRegisterClass *VRC) const {
- switch (VRC->getSize()) {
- case 4:
- return &AMDGPU::SGPR_32RegClass;
- case 8:
- return &AMDGPU::SReg_64RegClass;
- case 16:
- return &AMDGPU::SReg_128RegClass;
+ switch (getRegSizeInBits(*VRC)) {
case 32:
- return &AMDGPU::SReg_256RegClass;
+ return &AMDGPU::SGPR_32RegClass;
case 64:
+ return &AMDGPU::SReg_64RegClass;
+ case 128:
+ return &AMDGPU::SReg_128RegClass;
+ case 256:
+ return &AMDGPU::SReg_256RegClass;
+ case 512:
return &AMDGPU::SReg_512RegClass;
default:
llvm_unreachable("Invalid register class size");
@@ -1354,15 +1357,15 @@
const TargetRegisterClass *DstRC,
unsigned DstSubReg,
const TargetRegisterClass *NewRC) const {
- unsigned SrcSize = SrcRC->getSize();
- unsigned DstSize = DstRC->getSize();
- unsigned NewSize = NewRC->getSize();
+ unsigned SrcSize = getRegSizeInBits(*SrcRC);
+ unsigned DstSize = getRegSizeInBits(*DstRC);
+ unsigned NewSize = getRegSizeInBits(*NewRC);
// Do not increase size of registers beyond dword, we would need to allocate
// adjacent registers and constraint regalloc more than needed.
// Always allow dword coalescing.
- if (SrcSize <= 4 || DstSize <= 4)
+ if (SrcSize <= 32 || DstSize <= 32)
return true;
return NewSize <= DstSize || NewSize <= SrcSize;
diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp
index 4f5711c..5c9d589 100644
--- a/lib/Target/ARM/ARMBaseInstrInfo.cpp
+++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp
@@ -905,7 +905,7 @@
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
MFI.getObjectSize(FI), Align);
- switch (RC->getSize()) {
+ switch (TRI->getSpillSize(*RC)) {
case 4:
if (ARM::GPRRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(ARM::STRi12))
@@ -1103,7 +1103,7 @@
MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
MFI.getObjectSize(FI), Align);
- switch (RC->getSize()) {
+ switch (TRI->getSpillSize(*RC)) {
case 4:
if (ARM::GPRRegClass.hasSubClassEq(RC)) {
BuildMI(MBB, I, DL, get(ARM::LDRi12), DestReg)
diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
index 70a44ea..a208875 100644
--- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp
+++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp
@@ -806,7 +806,8 @@
if (!DstSubReg)
return true;
// Small registers don't frequently cause a problem, so we can coalesce them.
- if (NewRC->getSize() < 32 && DstRC->getSize() < 32 && SrcRC->getSize() < 32)
+ if (getRegSizeInBits(*NewRC) < 256 && getRegSizeInBits(*DstRC) < 256 &&
+ getRegSizeInBits(*SrcRC) < 256)
return true;
auto NewRCWeight =
diff --git a/lib/Target/ARM/ARMFrameLowering.cpp b/lib/Target/ARM/ARMFrameLowering.cpp
index 70dbe1b..4f7a0ab 100644
--- a/lib/Target/ARM/ARMFrameLowering.cpp
+++ b/lib/Target/ARM/ARMFrameLowering.cpp
@@ -1960,10 +1960,10 @@
// note: Thumb1 functions spill to R12, not the stack. Reserve a slot
// closest to SP or frame pointer.
assert(RS && "Register scavenging not provided");
- const TargetRegisterClass *RC = &ARM::GPRRegClass;
- RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
+ const TargetRegisterClass &RC = ARM::GPRRegClass;
+ unsigned Size = TRI->getSpillSize(RC);
+ unsigned Align = TRI->getSpillAlignment(RC);
+ RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
}
}
}
diff --git a/lib/Target/AVR/AVRAsmPrinter.cpp b/lib/Target/AVR/AVRAsmPrinter.cpp
index 50bb50b..d6491ce 100644
--- a/lib/Target/AVR/AVRAsmPrinter.cpp
+++ b/lib/Target/AVR/AVRAsmPrinter.cpp
@@ -112,7 +112,8 @@
const AVRSubtarget &STI = MF->getSubtarget<AVRSubtarget>();
const TargetRegisterInfo &TRI = *STI.getRegisterInfo();
- unsigned BytesPerReg = TRI.getMinimalPhysRegClass(Reg)->getSize();
+ const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(Reg);
+ unsigned BytesPerReg = TRI.getRegSizeInBits(*RC) / 8;
assert(BytesPerReg <= 2 && "Only 8 and 16 bit regs are supported.");
unsigned RegIdx = ByteNumber / BytesPerReg;
diff --git a/lib/Target/AVR/AVRFrameLowering.cpp b/lib/Target/AVR/AVRFrameLowering.cpp
index b8cb221..ab42a7a 100644
--- a/lib/Target/AVR/AVRFrameLowering.cpp
+++ b/lib/Target/AVR/AVRFrameLowering.cpp
@@ -239,7 +239,7 @@
unsigned Reg = CSI[i - 1].getReg();
bool IsNotLiveIn = !MBB.isLiveIn(Reg);
- assert(TRI->getMinimalPhysRegClass(Reg)->getSize() == 1 &&
+ assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 &&
"Invalid register size");
// Add the callee-saved register as live-in only if it is not already a
@@ -277,7 +277,7 @@
for (const CalleeSavedInfo &CCSI : CSI) {
unsigned Reg = CCSI.getReg();
- assert(TRI->getMinimalPhysRegClass(Reg)->getSize() == 1 &&
+ assert(TRI->getRegSizeInBits(*TRI->getMinimalPhysRegClass(Reg)) == 8 &&
"Invalid register size");
BuildMI(MBB, MI, DL, TII.get(AVR::POPRd), Reg);
diff --git a/lib/Target/Hexagon/BitTracker.cpp b/lib/Target/Hexagon/BitTracker.cpp
index cb3049b..07767d1 100644
--- a/lib/Target/Hexagon/BitTracker.cpp
+++ b/lib/Target/Hexagon/BitTracker.cpp
@@ -347,7 +347,7 @@
unsigned PhysS = (RR.Sub == 0) ? PhysR : TRI.getSubReg(PhysR, RR.Sub);
const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(PhysS);
- uint16_t BW = RC->getSize()*8;
+ uint16_t BW = TRI.getRegSizeInBits(*RC);
return BW;
}
diff --git a/lib/Target/Hexagon/HexagonAsmPrinter.cpp b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
index fda23f8..c8483f7 100644
--- a/lib/Target/Hexagon/HexagonAsmPrinter.cpp
+++ b/lib/Target/Hexagon/HexagonAsmPrinter.cpp
@@ -286,9 +286,9 @@
const MCRegisterInfo *RI = OutStreamer->getContext().getRegisterInfo();
const MachineFunction &MF = *MI.getParent()->getParent();
const auto &HST = MF.getSubtarget<HexagonSubtarget>();
- unsigned VectorSize = HST.useHVXSglOps()
- ? Hexagon::VectorRegsRegClass.getSize()
- : Hexagon::VectorRegs128BRegClass.getSize();
+ const auto &VecRC = HST.useHVXSglOps() ? Hexagon::VectorRegsRegClass
+ : Hexagon::VectorRegs128BRegClass;
+ unsigned VectorSize = HST.getRegisterInfo()->getSpillSize(VecRC);
switch (Inst.getOpcode()) {
default: return;
diff --git a/lib/Target/Hexagon/HexagonBitSimplify.cpp b/lib/Target/Hexagon/HexagonBitSimplify.cpp
index 61f290c..079b7d4 100644
--- a/lib/Target/Hexagon/HexagonBitSimplify.cpp
+++ b/lib/Target/Hexagon/HexagonBitSimplify.cpp
@@ -407,7 +407,7 @@
const TargetRegisterClass *RC = MRI.getRegClass(RR.Reg);
if (RR.Sub == 0) {
Begin = 0;
- Width = RC->getSize()*8;
+ Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC);
return true;
}
@@ -417,7 +417,7 @@
case Hexagon::DoubleRegsRegClassID:
case Hexagon::VecDblRegsRegClassID:
case Hexagon::VecDblRegs128BRegClassID:
- Width = RC->getSize()*8 / 2;
+ Width = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 2;
if (RR.Sub == Hexagon::isub_hi || RR.Sub == Hexagon::vsub_hi)
Begin = Width;
break;
@@ -1054,8 +1054,8 @@
class RedundantInstrElimination : public Transformation {
public:
RedundantInstrElimination(BitTracker &bt, const HexagonInstrInfo &hii,
- MachineRegisterInfo &mri)
- : Transformation(true), HII(hii), MRI(mri), BT(bt) {}
+ const HexagonRegisterInfo &hri, MachineRegisterInfo &mri)
+ : Transformation(true), HII(hii), HRI(hri), MRI(mri), BT(bt) {}
bool processBlock(MachineBasicBlock &B, const RegisterSet &AVs) override;
@@ -1070,6 +1070,7 @@
bool usedBitsEqual(BitTracker::RegisterRef RD, BitTracker::RegisterRef RS);
const HexagonInstrInfo &HII;
+ const HexagonRegisterInfo &HRI;
MachineRegisterInfo &MRI;
BitTracker &BT;
};
@@ -1262,7 +1263,7 @@
assert(MI.getOperand(OpN).isReg());
BitTracker::RegisterRef RR = MI.getOperand(OpN);
const TargetRegisterClass *RC = HBS::getFinalVRegClass(RR, MRI);
- uint16_t Width = RC->getSize()*8;
+ uint16_t Width = HRI.getRegSizeInBits(*RC);
if (!GotBits)
T.set(Begin, Begin+Width);
@@ -2651,7 +2652,7 @@
Changed |= visitBlock(Entry, ImmG, AIG);
RegisterSet ARE; // Available registers for RIE.
- RedundantInstrElimination RIE(BT, HII, MRI);
+ RedundantInstrElimination RIE(BT, HII, HRI, MRI);
bool Ried = visitBlock(Entry, RIE, ARE);
if (Ried) {
Changed = true;
diff --git a/lib/Target/Hexagon/HexagonExpandCondsets.cpp b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
index d8ba5dc..9f8c9de 100644
--- a/lib/Target/Hexagon/HexagonExpandCondsets.cpp
+++ b/lib/Target/Hexagon/HexagonExpandCondsets.cpp
@@ -559,10 +559,10 @@
}
unsigned PhysS = (RS.Sub == 0) ? PhysR : TRI->getSubReg(PhysR, RS.Sub);
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(PhysS);
- switch (RC->getSize()) {
- case 4:
+ switch (TRI->getRegSizeInBits(*RC)) {
+ case 32:
return IfTrue ? A2_tfrt : A2_tfrf;
- case 8:
+ case 64:
return IfTrue ? A2_tfrpt : A2_tfrpf;
}
llvm_unreachable("Invalid register operand");
diff --git a/lib/Target/Hexagon/HexagonFrameLowering.cpp b/lib/Target/Hexagon/HexagonFrameLowering.cpp
index 0e2380f..a04aca4 100644
--- a/lib/Target/Hexagon/HexagonFrameLowering.cpp
+++ b/lib/Target/Hexagon/HexagonFrameLowering.cpp
@@ -1425,7 +1425,7 @@
if (!SRegs[S->Reg])
continue;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(S->Reg);
- int FI = MFI.CreateFixedSpillStackObject(RC->getSize(), S->Offset);
+ int FI = MFI.CreateFixedSpillStackObject(TRI->getSpillSize(*RC), S->Offset);
MinOffset = std::min(MinOffset, S->Offset);
CSI.push_back(CalleeSavedInfo(S->Reg, FI));
SRegs[S->Reg] = false;
@@ -1437,11 +1437,12 @@
for (int x = SRegs.find_first(); x >= 0; x = SRegs.find_next(x)) {
unsigned R = x;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(R);
- int Off = MinOffset - RC->getSize();
- unsigned Align = std::min(RC->getAlignment(), getStackAlignment());
+ unsigned Size = TRI->getSpillSize(*RC);
+ int Off = MinOffset - Size;
+ unsigned Align = std::min(TRI->getSpillAlignment(*RC), getStackAlignment());
assert(isPowerOf2_32(Align));
Off &= -Align;
- int FI = MFI.CreateFixedSpillStackObject(RC->getSize(), Off);
+ int FI = MFI.CreateFixedSpillStackObject(Size, Off);
MinOffset = std::min(MinOffset, Off);
CSI.push_back(CalleeSavedInfo(R, FI));
SRegs[R] = false;
@@ -1677,10 +1678,10 @@
int FI = MI->getOperand(0).getIndex();
bool Is128B = HST.useHVXDblOps();
- auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
- : &Hexagon::VectorRegs128BRegClass;
- unsigned Size = RC->getSize();
- unsigned NeedAlign = RC->getAlignment();
+ const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
+ : Hexagon::VectorRegs128BRegClass;
+ unsigned Size = HRI.getSpillSize(RC);
+ unsigned NeedAlign = HRI.getSpillAlignment(RC);
unsigned HasAlign = MFI.getObjectAlignment(FI);
unsigned StoreOpc;
@@ -1734,10 +1735,10 @@
int FI = MI->getOperand(1).getIndex();
bool Is128B = HST.useHVXDblOps();
- auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
- : &Hexagon::VectorRegs128BRegClass;
- unsigned Size = RC->getSize();
- unsigned NeedAlign = RC->getAlignment();
+ const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
+ : Hexagon::VectorRegs128BRegClass;
+ unsigned Size = HRI.getSpillSize(RC);
+ unsigned NeedAlign = HRI.getSpillAlignment(RC);
unsigned HasAlign = MFI.getObjectAlignment(FI);
unsigned LoadOpc;
@@ -1777,16 +1778,16 @@
if (!MI->getOperand(0).isFI())
return false;
+ auto &HRI = *HST.getRegisterInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned SrcR = MI->getOperand(2).getReg();
bool IsKill = MI->getOperand(2).isKill();
int FI = MI->getOperand(0).getIndex();
bool Is128B = HST.useHVXDblOps();
- auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
- : &Hexagon::VectorRegs128BRegClass;
-
- unsigned NeedAlign = RC->getAlignment();
+ const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
+ : Hexagon::VectorRegs128BRegClass;
+ unsigned NeedAlign = HRI.getSpillAlignment(RC);
unsigned HasAlign = MFI.getObjectAlignment(FI);
unsigned StoreOpc;
@@ -1815,15 +1816,15 @@
if (!MI->getOperand(1).isFI())
return false;
+ auto &HRI = *HST.getRegisterInfo();
DebugLoc DL = MI->getDebugLoc();
unsigned DstR = MI->getOperand(0).getReg();
int FI = MI->getOperand(1).getIndex();
bool Is128B = HST.useHVXDblOps();
- auto *RC = !Is128B ? &Hexagon::VectorRegsRegClass
- : &Hexagon::VectorRegs128BRegClass;
-
- unsigned NeedAlign = RC->getAlignment();
+ const auto &RC = !Is128B ? Hexagon::VectorRegsRegClass
+ : Hexagon::VectorRegs128BRegClass;
+ unsigned NeedAlign = HRI.getSpillAlignment(RC);
unsigned HasAlign = MFI.getObjectAlignment(FI);
unsigned LoadOpc;
@@ -1932,7 +1933,7 @@
if (!needToReserveScavengingSpillSlots(MF, HRI, RC))
continue;
unsigned Num = RC == &Hexagon::IntRegsRegClass ? NumberScavengerSlots : 1;
- unsigned S = RC->getSize(), A = RC->getAlignment();
+ unsigned S = HRI.getSpillSize(*RC), A = HRI.getSpillAlignment(*RC);
for (unsigned i = 0; i < Num; i++) {
int NewFI = MFI.CreateSpillStackObject(S, A);
RS->addScavengingFrameIndex(NewFI);
diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp
index 2a9d962..134f7ac 100644
--- a/lib/Target/Mips/MipsAsmPrinter.cpp
+++ b/lib/Target/Mips/MipsAsmPrinter.cpp
@@ -273,9 +273,9 @@
const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
// size of stack area to which FP callee-saved regs are saved.
- unsigned CPURegSize = Mips::GPR32RegClass.getSize();
- unsigned FGR32RegSize = Mips::FGR32RegClass.getSize();
- unsigned AFGR64RegSize = Mips::AFGR64RegClass.getSize();
+ unsigned CPURegSize = TRI->getRegSizeInBits(Mips::GPR32RegClass) / 8;
+ unsigned FGR32RegSize = TRI->getRegSizeInBits(Mips::FGR32RegClass) / 8;
+ unsigned AFGR64RegSize = TRI->getRegSizeInBits(Mips::AFGR64RegClass) / 8;
bool HasAFGR64Reg = false;
unsigned CSFPRegsSize = 0;
diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp
index b2cf039..ef05166 100644
--- a/lib/Target/Mips/MipsFrameLowering.cpp
+++ b/lib/Target/Mips/MipsFrameLowering.cpp
@@ -119,7 +119,7 @@
// Conservatively assume all callee-saved registers will be saved.
for (const MCPhysReg *R = TRI.getCalleeSavedRegs(&MF); *R; ++R) {
- unsigned Size = TRI.getMinimalPhysRegClass(*R)->getSize();
+ unsigned Size = TRI.getSpillSize(*TRI.getMinimalPhysRegClass(*R));
Offset = alignTo(Offset + Size, Size);
}
diff --git a/lib/Target/Mips/MipsMachineFunction.cpp b/lib/Target/Mips/MipsMachineFunction.cpp
index 5bf4c95..ddd3564 100644
--- a/lib/Target/Mips/MipsMachineFunction.cpp
+++ b/lib/Target/Mips/MipsMachineFunction.cpp
@@ -53,14 +53,15 @@
}
void MipsFunctionInfo::createEhDataRegsFI() {
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
for (int I = 0; I < 4; ++I) {
- const TargetRegisterClass *RC =
+ const TargetRegisterClass &RC =
static_cast<const MipsTargetMachine &>(MF.getTarget()).getABI().IsN64()
- ? &Mips::GPR64RegClass
- : &Mips::GPR32RegClass;
+ ? Mips::GPR64RegClass
+ : Mips::GPR32RegClass;
- EhDataRegFI[I] = MF.getFrameInfo().CreateStackObject(RC->getSize(),
- RC->getAlignment(), false);
+ EhDataRegFI[I] = MF.getFrameInfo().CreateStackObject(TRI.getSpillSize(RC),
+ TRI.getSpillAlignment(RC), false);
}
}
@@ -69,11 +70,12 @@
// The current implementation only supports Mips32r2+ not Mips64rX. Status
// is always 32 bits, ErrorPC is 32 or 64 bits dependent on architecture,
// however Mips32r2+ is the supported architecture.
- const TargetRegisterClass *RC = &Mips::GPR32RegClass;
+ const TargetRegisterClass &RC = Mips::GPR32RegClass;
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
for (int I = 0; I < 2; ++I)
ISRDataRegFI[I] = MF.getFrameInfo().CreateStackObject(
- RC->getSize(), RC->getAlignment(), false);
+ TRI.getSpillSize(RC), TRI.getSpillAlignment(RC), false);
}
bool MipsFunctionInfo::isEhDataRegFI(int FI) const {
@@ -93,9 +95,10 @@
}
int MipsFunctionInfo::getMoveF64ViaSpillFI(const TargetRegisterClass *RC) {
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
if (MoveF64ViaSpillFI == -1) {
MoveF64ViaSpillFI = MF.getFrameInfo().CreateStackObject(
- RC->getSize(), RC->getAlignment(), false);
+ TRI.getSpillSize(*RC), TRI.getSpillAlignment(*RC), false);
}
return MoveF64ViaSpillFI;
}
diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp
index ef8d18c..e765b462 100644
--- a/lib/Target/Mips/MipsSEFrameLowering.cpp
+++ b/lib/Target/Mips/MipsSEFrameLowering.cpp
@@ -260,7 +260,8 @@
// copy dst_hi, $vr1
unsigned Dst = I->getOperand(0).getReg(), Src = I->getOperand(1).getReg();
- unsigned VRegSize = RegInfo.getMinimalPhysRegClass(Dst)->getSize() / 2;
+ const TargetRegisterClass *DstRC = RegInfo.getMinimalPhysRegClass(Dst);
+ unsigned VRegSize = RegInfo.getRegSizeInBits(*DstRC) / 16;
const TargetRegisterClass *RC = RegInfo.intRegClass(VRegSize);
unsigned VR0 = MRI.createVirtualRegister(RC);
unsigned VR1 = MRI.createVirtualRegister(RC);
@@ -858,6 +859,7 @@
BitVector &SavedRegs,
RegScavenger *RS) const {
TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
MipsABIInfo ABI = STI.getABI();
unsigned FP = ABI.GetFramePtr();
@@ -883,10 +885,11 @@
if (ExpandPseudo(MF).expand()) {
// The spill slot should be half the size of the accumulator. If target is
// mips64, it should be 64-bit, otherwise it should be 32-bt.
- const TargetRegisterClass *RC = STI.hasMips64() ?
- &Mips::GPR64RegClass : &Mips::GPR32RegClass;
- int FI = MF.getFrameInfo().CreateStackObject(RC->getSize(),
- RC->getAlignment(), false);
+ const TargetRegisterClass &RC = STI.hasMips64() ?
+ Mips::GPR64RegClass : Mips::GPR32RegClass;
+ int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC),
+ TRI->getSpillAlignment(RC),
+ false);
RS->addScavengingFrameIndex(FI);
}
@@ -897,10 +900,11 @@
if (isInt<16>(MaxSPOffset))
return;
- const TargetRegisterClass *RC =
- ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
- int FI = MF.getFrameInfo().CreateStackObject(RC->getSize(),
- RC->getAlignment(), false);
+ const TargetRegisterClass &RC =
+ ABI.ArePtrs64bit() ? Mips::GPR64RegClass : Mips::GPR32RegClass;
+ int FI = MF.getFrameInfo().CreateStackObject(TRI->getSpillSize(RC),
+ TRI->getSpillAlignment(RC),
+ false);
RS->addScavengingFrameIndex(FI);
}
diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp
index 91e712a..6ce3f88 100644
--- a/lib/Target/Mips/MipsSEInstrInfo.cpp
+++ b/lib/Target/Mips/MipsSEInstrInfo.cpp
@@ -567,8 +567,8 @@
const MCInstrDesc &Desc = get(Opc);
assert(Desc.NumOperands == 2 && "Unary instruction expected.");
const MipsRegisterInfo *RI = &getRegisterInfo();
- unsigned DstRegSize = getRegClass(Desc, 0, RI, MF)->getSize();
- unsigned SrcRegSize = getRegClass(Desc, 1, RI, MF)->getSize();
+ unsigned DstRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 0, RI, MF));
+ unsigned SrcRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 1, RI, MF));
return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize);
}
diff --git a/lib/Target/NVPTX/NVPTXInstrInfo.cpp b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
index 3026f0b..0f6c2e5 100644
--- a/lib/Target/NVPTX/NVPTXInstrInfo.cpp
+++ b/lib/Target/NVPTX/NVPTXInstrInfo.cpp
@@ -38,7 +38,7 @@
const TargetRegisterClass *DestRC = MRI.getRegClass(DestReg);
const TargetRegisterClass *SrcRC = MRI.getRegClass(SrcReg);
- if (DestRC->getSize() != SrcRC->getSize())
+ if (RegInfo.getRegSizeInBits(*DestRC) != RegInfo.getRegSizeInBits(*SrcRC))
report_fatal_error("Copy one register into another with a different width");
unsigned Op;
diff --git a/lib/Target/PowerPC/PPCFrameLowering.cpp b/lib/Target/PowerPC/PPCFrameLowering.cpp
index 4c9430a..2a402de 100644
--- a/lib/Target/PowerPC/PPCFrameLowering.cpp
+++ b/lib/Target/PowerPC/PPCFrameLowering.cpp
@@ -1898,12 +1898,13 @@
MachineFrameInfo &MFI = MF.getFrameInfo();
if (MFI.hasVarSizedObjects() || spillsCR(MF) || spillsVRSAVE(MF) ||
hasNonRISpills(MF) || (hasSpills(MF) && !isInt<16>(StackSize))) {
- const TargetRegisterClass *GPRC = &PPC::GPRCRegClass;
- const TargetRegisterClass *G8RC = &PPC::G8RCRegClass;
- const TargetRegisterClass *RC = Subtarget.isPPC64() ? G8RC : GPRC;
- RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
+ const TargetRegisterClass &GPRC = PPC::GPRCRegClass;
+ const TargetRegisterClass &G8RC = PPC::G8RCRegClass;
+ const TargetRegisterClass &RC = Subtarget.isPPC64() ? G8RC : GPRC;
+ const TargetRegisterInfo &TRI = *Subtarget.getRegisterInfo();
+ unsigned Size = TRI.getSpillSize(RC);
+ unsigned Align = TRI.getSpillAlignment(RC);
+ RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
// Might we have over-aligned allocas?
bool HasAlVars = MFI.hasVarSizedObjects() &&
@@ -1911,9 +1912,7 @@
// These kinds of spills might need two registers.
if (spillsCR(MF) || spillsVRSAVE(MF) || HasAlVars)
- RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
+ RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
}
}
diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 58c4b14..fee008b 100644
--- a/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -1115,10 +1115,9 @@
return nullptr;
unsigned OpNum = Ops[0];
- assert(Size ==
- MF.getRegInfo()
- .getRegClass(MI.getOperand(OpNum).getReg())
- ->getSize() &&
+ assert(Size * 8 ==
+ TRI->getRegSizeInBits(*MF.getRegInfo()
+ .getRegClass(MI.getOperand(OpNum).getReg())) &&
"Invalid size combination");
if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp
index 036f5d2..b847781 100644
--- a/lib/Target/X86/X86FastISel.cpp
+++ b/lib/Target/X86/X86FastISel.cpp
@@ -2149,7 +2149,8 @@
if (!LHSReg || !RHSReg)
return false;
- unsigned Opc = X86::getCMovFromCond(CC, RC->getSize());
+ const TargetRegisterInfo &TRI = *Subtarget->getRegisterInfo();
+ unsigned Opc = X86::getCMovFromCond(CC, TRI.getRegSizeInBits(*RC)/8);
unsigned ResultReg = fastEmitInst_rr(Opc, RC, RHSReg, RHSIsKill,
LHSReg, LHSIsKill);
updateValueMap(I, ResultReg);
diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp
index 6dddb10..a94045c 100644
--- a/lib/Target/X86/X86FrameLowering.cpp
+++ b/lib/Target/X86/X86FrameLowering.cpp
@@ -1924,14 +1924,15 @@
continue;
const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
+ unsigned Size = TRI->getSpillSize(*RC);
+ unsigned Align = TRI->getSpillAlignment(*RC);
// ensure alignment
- SpillSlotOffset -= std::abs(SpillSlotOffset) % RC->getAlignment();
+ SpillSlotOffset -= std::abs(SpillSlotOffset) % Align;
// spill into slot
- SpillSlotOffset -= RC->getSize();
- int SlotIndex =
- MFI.CreateFixedSpillStackObject(RC->getSize(), SpillSlotOffset);
+ SpillSlotOffset -= Size;
+ int SlotIndex = MFI.CreateFixedSpillStackObject(Size, SpillSlotOffset);
CSI[i - 1].setFrameIdx(SlotIndex);
- MFI.ensureMaxAlignment(RC->getAlignment());
+ MFI.ensureMaxAlignment(Align);
}
return true;
diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp
index 7e192af..5e0bd10 100644
--- a/lib/Target/X86/X86InstrInfo.cpp
+++ b/lib/Target/X86/X86InstrInfo.cpp
@@ -6284,9 +6284,11 @@
ArrayRef<MachineOperand> Cond, unsigned TrueReg,
unsigned FalseReg) const {
MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
+ const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
+ const TargetRegisterClass &RC = *MRI.getRegClass(DstReg);
assert(Cond.size() == 1 && "Invalid Cond array");
unsigned Opc = getCMovFromCond((X86::CondCode)Cond[0].getImm(),
- MRI.getRegClass(DstReg)->getSize(),
+ TRI.getRegSizeInBits(RC) / 8,
false /*HasMemoryOperand*/);
BuildMI(MBB, I, DL, get(Opc), DstReg).addReg(FalseReg).addReg(TrueReg);
}
@@ -6557,7 +6559,7 @@
bool HasAVX512 = STI.hasAVX512();
bool HasVLX = STI.hasVLX();
- switch (RC->getSize()) {
+ switch (STI.getRegisterInfo()->getSpillSize(*RC)) {
default:
llvm_unreachable("Unknown spill size");
case 1:
@@ -6717,9 +6719,9 @@
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
- assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= RC->getSize() &&
+ assert(MF.getFrameInfo().getObjectSize(FrameIdx) >= TRI->getSpillSize(*RC) &&
"Stack slot too small for store");
- unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
+ unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
@@ -6736,7 +6738,8 @@
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, Subtarget);
@@ -6756,7 +6759,7 @@
const TargetRegisterClass *RC,
const TargetRegisterInfo *TRI) const {
const MachineFunction &MF = *MBB.getParent();
- unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
+ unsigned Alignment = std::max<uint32_t>(TRI->getSpillSize(*RC), 16);
bool isAligned =
(Subtarget.getFrameLowering()->getStackAlignment() >= Alignment) ||
RI.canRealignStack(MF);
@@ -6771,7 +6774,8 @@
MachineInstr::mmo_iterator MMOBegin,
MachineInstr::mmo_iterator MMOEnd,
SmallVectorImpl<MachineInstr*> &NewMIs) const {
- unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = MMOBegin != MMOEnd &&
(*MMOBegin)->getAlignment() >= Alignment;
unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, Subtarget);
@@ -7230,7 +7234,8 @@
NewOpc = getSETFromCond(NewCC, HasMemoryOperand);
else {
unsigned DstReg = Instr.getOperand(0).getReg();
- NewOpc = getCMovFromCond(NewCC, MRI->getRegClass(DstReg)->getSize(),
+ const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg);
+ NewOpc = getCMovFromCond(NewCC, TRI->getRegSizeInBits(*DstRC)/8,
HasMemoryOperand);
}
@@ -7758,7 +7763,9 @@
unsigned DstIdx = (Imm >> 4) & 3;
unsigned SrcIdx = (Imm >> 6) & 3;
- unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
+ unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size <= RCSize && 4 <= Align) {
int PtrOffset = SrcIdx * 4;
unsigned NewImm = (DstIdx << 4) | ZMask;
@@ -7780,7 +7787,9 @@
// To fold the load, adjust the pointer to the upper and use (V)MOVLPS.
// TODO: In most cases AVX doesn't have a 8-byte alignment requirement.
if (OpNum == 2) {
- unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum, &RI, MF);
+ unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size <= RCSize && 8 <= Align) {
unsigned NewOpCode =
(MI.getOpcode() == X86::VMOVHLPSZrr) ? X86::VMOVLPSZ128rm :
@@ -7869,7 +7878,10 @@
return nullptr;
bool NarrowToMOV32rm = false;
if (Size) {
- unsigned RCSize = getRegClass(MI.getDesc(), OpNum, &RI, MF)->getSize();
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ const TargetRegisterClass *RC = getRegClass(MI.getDesc(), OpNum,
+ &RI, MF);
+ unsigned RCSize = TRI.getRegSizeInBits(*RC) / 8;
if (Size < RCSize) {
// Check if it's safe to fold the load. If the size of the object is
// narrower than the load width, then it's not.
@@ -8310,11 +8322,13 @@
const MachineFunction &MF) {
unsigned Opc = LoadMI.getOpcode();
unsigned UserOpc = UserMI.getOpcode();
- unsigned RegSize =
- MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg())->getSize();
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
+ const TargetRegisterClass *RC =
+ MF.getRegInfo().getRegClass(LoadMI.getOperand(0).getReg());
+ unsigned RegSize = TRI.getRegSizeInBits(*RC);
if ((Opc == X86::MOVSSrm || Opc == X86::VMOVSSrm || Opc == X86::VMOVSSZrm) &&
- RegSize > 4) {
+ RegSize > 32) {
// These instructions only load 32 bits, we can't fold them if the
// destination register is wider than 32 bits (4 bytes), and its user
// instruction isn't scalar (SS).
@@ -8365,7 +8379,7 @@
}
if ((Opc == X86::MOVSDrm || Opc == X86::VMOVSDrm || Opc == X86::VMOVSDZrm) &&
- RegSize > 8) {
+ RegSize > 64) {
// These instructions only load 64 bits, we can't fold them if the
// destination register is wider than 64 bits (8 bytes), and its user
// instruction isn't scalar (SD).
@@ -8710,6 +8724,7 @@
bool FoldedStore = I->second.second & TB_FOLDED_STORE;
const MCInstrDesc &MCID = get(Opc);
MachineFunction &MF = DAG.getMachineFunction();
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass *RC = getRegClass(MCID, Index, &RI, MF);
unsigned NumDefs = MCID.NumDefs;
std::vector<SDValue> AddrOps;
@@ -8744,7 +8759,7 @@
return false;
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
- unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
+ unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = (*MMOs.first) &&
(*MMOs.first)->getAlignment() >= Alignment;
Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, Subtarget), dl,
@@ -8789,7 +8804,7 @@
return false;
// FIXME: If a VR128 can have size 32, we should be checking if a 32-byte
// memory access is slow above.
- unsigned Alignment = std::max<uint32_t>(RC->getSize(), 16);
+ unsigned Alignment = std::max<uint32_t>(TRI.getSpillSize(*RC), 16);
bool isAligned = (*MMOs.first) &&
(*MMOs.first)->getAlignment() >= Alignment;
SDNode *Store =
diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp
index 258f167..1f16f3c 100644
--- a/lib/Target/X86/X86RegisterInfo.cpp
+++ b/lib/Target/X86/X86RegisterInfo.cpp
@@ -137,25 +137,29 @@
case X86::FR32RegClassID:
case X86::FR64RegClassID:
// If AVX-512 isn't supported we should only inflate to these classes.
- if (!Subtarget.hasAVX512() && Super->getSize() == RC->getSize())
+ if (!Subtarget.hasAVX512() &&
+ getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::VR128RegClassID:
case X86::VR256RegClassID:
// If VLX isn't supported we should only inflate to these classes.
- if (!Subtarget.hasVLX() && Super->getSize() == RC->getSize())
+ if (!Subtarget.hasVLX() &&
+ getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::VR128XRegClassID:
case X86::VR256XRegClassID:
// If VLX isn't support we shouldn't inflate to these classes.
- if (Subtarget.hasVLX() && Super->getSize() == RC->getSize())
+ if (Subtarget.hasVLX() &&
+ getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::FR32XRegClassID:
case X86::FR64XRegClassID:
// If AVX-512 isn't support we shouldn't inflate to these classes.
- if (Subtarget.hasAVX512() && Super->getSize() == RC->getSize())
+ if (Subtarget.hasAVX512() &&
+ getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
break;
case X86::GR8RegClassID:
@@ -168,7 +172,7 @@
case X86::VR512RegClassID:
// Don't return a super-class that would shrink the spill size.
// That can happen with the vector and float classes.
- if (Super->getSize() == RC->getSize())
+ if (getRegSizeInBits(*Super) == getRegSizeInBits(*RC))
return Super;
}
Super = *I++;
diff --git a/lib/Target/XCore/XCoreFrameLowering.cpp b/lib/Target/XCore/XCoreFrameLowering.cpp
index a752357..7846120 100644
--- a/lib/Target/XCore/XCoreFrameLowering.cpp
+++ b/lib/Target/XCore/XCoreFrameLowering.cpp
@@ -575,18 +575,17 @@
RegScavenger *RS) const {
assert(RS && "requiresRegisterScavenging failed");
MachineFrameInfo &MFI = MF.getFrameInfo();
- const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+ const TargetRegisterClass &RC = XCore::GRRegsRegClass;
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
// Reserve slots close to SP or frame pointer for Scavenging spills.
// When using SP for small frames, we don't need any scratch registers.
// When using SP for large frames, we may need 2 scratch registers.
// When using FP, for large or small frames, we may need 1 scratch register.
+ unsigned Size = TRI.getSpillSize(RC);
+ unsigned Align = TRI.getSpillAlignment(RC);
if (XFI->isLargeFrame(MF) || hasFP(MF))
- RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
+ RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
if (XFI->isLargeFrame(MF) && !hasFP(MF))
- RS->addScavengingFrameIndex(MFI.CreateStackObject(RC->getSize(),
- RC->getAlignment(),
- false));
+ RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
}
diff --git a/lib/Target/XCore/XCoreMachineFunctionInfo.cpp b/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
index e91536c..75af0e9 100644
--- a/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
+++ b/lib/Target/XCore/XCoreMachineFunctionInfo.cpp
@@ -10,6 +10,7 @@
#include "XCoreMachineFunctionInfo.h"
#include "XCoreInstrInfo.h"
#include "llvm/IR/Function.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
using namespace llvm;
@@ -35,13 +36,15 @@
if (LRSpillSlotSet) {
return LRSpillSlot;
}
- const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+ const TargetRegisterClass &RC = XCore::GRRegsRegClass;
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
if (! MF.getFunction()->isVarArg()) {
// A fixed offset of 0 allows us to save / restore LR using entsp / retsp.
- LRSpillSlot = MFI.CreateFixedObject(RC->getSize(), 0, true);
+ LRSpillSlot = MFI.CreateFixedObject(TRI.getSpillSize(RC), 0, true);
} else {
- LRSpillSlot = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true);
+ LRSpillSlot = MFI.CreateStackObject(TRI.getSpillSize(RC),
+ TRI.getSpillAlignment(RC), true);
}
LRSpillSlotSet = true;
return LRSpillSlot;
@@ -51,9 +54,11 @@
if (FPSpillSlotSet) {
return FPSpillSlot;
}
- const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+ const TargetRegisterClass &RC = XCore::GRRegsRegClass;
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
- FPSpillSlot = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true);
+ FPSpillSlot = MFI.CreateStackObject(TRI.getSpillSize(RC),
+ TRI.getSpillAlignment(RC), true);
FPSpillSlotSet = true;
return FPSpillSlot;
}
@@ -62,10 +67,13 @@
if (EHSpillSlotSet) {
return EHSpillSlot;
}
- const TargetRegisterClass *RC = &XCore::GRRegsRegClass;
+ const TargetRegisterClass &RC = XCore::GRRegsRegClass;
+ const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo();
MachineFrameInfo &MFI = MF.getFrameInfo();
- EHSpillSlot[0] = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true);
- EHSpillSlot[1] = MFI.CreateStackObject(RC->getSize(), RC->getAlignment(), true);
+ unsigned Size = TRI.getSpillSize(RC);
+ unsigned Align = TRI.getSpillAlignment(RC);
+ EHSpillSlot[0] = MFI.CreateStackObject(Size, Align, true);
+ EHSpillSlot[1] = MFI.CreateStackObject(Size, Align, true);
EHSpillSlotSet = true;
return EHSpillSlot;
}