Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
236 changes: 127 additions & 109 deletions flang/lib/Optimizer/Transforms/DebugTypeGenerator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,7 @@ DebugTypeGenerator::DebugTypeGenerator(mlir::ModuleOp m,
mlir::SymbolTable *symbolTable_,
const mlir::DataLayout &dl)
: module(m), symbolTable(symbolTable_), dataLayout{&dl},
kindMapping(getKindMapping(m)), llvmTypeConverter(m, false, false, dl),
derivedTypeDepth(0) {
kindMapping(getKindMapping(m)), llvmTypeConverter(m, false, false, dl) {
LLVM_DEBUG(llvm::dbgs() << "DITypeAttr generator\n");

mlir::MLIRContext *context = module.getContext();
Expand Down Expand Up @@ -272,31 +271,127 @@ DebugTypeGenerator::getFieldSizeAndAlign(mlir::Type fieldTy) {
return std::pair{byteSize, byteAlign};
}

mlir::LLVM::DITypeAttr DerivedTypeCache::lookup(mlir::Type type) {
auto iter = typeCache.find(type);
if (iter != typeCache.end()) {
if (iter->second.first) {
componentActiveRecursionLevels = iter->second.second;
}
return iter->second.first;
}
return nullptr;
}

DerivedTypeCache::ActiveLevels
DerivedTypeCache::startTranslating(mlir::Type type,
mlir::LLVM::DITypeAttr placeHolder) {
derivedTypeDepth++;
if (!placeHolder)
return {};
typeCache[type] = std::pair<mlir::LLVM::DITypeAttr, ActiveLevels>(
placeHolder, {derivedTypeDepth});
return {};
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This function always returns an empty list.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is expected, at the starting point of the translation, it is not yet know which recursive references will be met directly or indirectly in components.

This list will be updated in postComponentVisitUpdate().

I added as a startTranslating results to enforce its creation at the right point.


void DerivedTypeCache::preComponentVisitUpdate() {
componentActiveRecursionLevels.clear();
}

void DerivedTypeCache::postComponentVisitUpdate(
ActiveLevels &activeRecursionLevels) {
if (componentActiveRecursionLevels.empty())
return;
ActiveLevels oldLevels;
oldLevels.swap(activeRecursionLevels);
std::merge(componentActiveRecursionLevels.begin(),
componentActiveRecursionLevels.end(), oldLevels.begin(),
oldLevels.end(), std::back_inserter(activeRecursionLevels));
}

void DerivedTypeCache::finalize(mlir::Type ty, mlir::LLVM::DITypeAttr attr,
ActiveLevels &&activeRecursionLevels) {
// If there is no nested recursion or if this type does not point to any type
// nodes above it, it is safe to cache it indefinitely (it can be used in any
// contexts).
if (activeRecursionLevels.empty() ||
(activeRecursionLevels[0] == derivedTypeDepth)) {
typeCache[ty] = std::pair<mlir::LLVM::DITypeAttr, ActiveLevels>(attr, {});
componentActiveRecursionLevels.clear();
cleanUpCache(derivedTypeDepth);
--derivedTypeDepth;
return;
}
// Trim any recursion below the current type.
if (activeRecursionLevels.back() >= derivedTypeDepth) {
auto last = llvm::find_if(activeRecursionLevels, [&](std::int32_t depth) {
return depth >= derivedTypeDepth;
});
if (last != activeRecursionLevels.end()) {
activeRecursionLevels.erase(last, activeRecursionLevels.end());
}
}
componentActiveRecursionLevels = std::move(activeRecursionLevels);
typeCache[ty] = std::pair<mlir::LLVM::DITypeAttr, ActiveLevels>(
attr, componentActiveRecursionLevels);
cleanUpCache(derivedTypeDepth);
if (!componentActiveRecursionLevels.empty())
insertCacheCleanUp(ty, componentActiveRecursionLevels.back());
--derivedTypeDepth;
}

void DerivedTypeCache::insertCacheCleanUp(mlir::Type type, int32_t depth) {
auto iter = llvm::find_if(cacheCleanupList,
[&](const auto &x) { return x.second >= depth; });
if (iter == cacheCleanupList.end()) {
cacheCleanupList.emplace_back(
std::pair<llvm::SmallVector<mlir::Type>, int32_t>({type}, depth));
return;
}
if (iter->second == depth) {
iter->first.push_back(type);
return;
}
cacheCleanupList.insert(
iter, std::pair<llvm::SmallVector<mlir::Type>, int32_t>({type}, depth));
}

void DerivedTypeCache::cleanUpCache(int32_t depth) {
if (cacheCleanupList.empty())
return;
// cleanups are done in the post actions when visiting a derived type
// tree. So if there is a clean-up for the current depth, it has to be
// the last one (deeper ones must have been done already).
if (cacheCleanupList.back().second == depth) {
for (mlir::Type type : cacheCleanupList.back().first)
typeCache[type].first = nullptr;
cacheCleanupList.pop_back_n(1);
}
}

mlir::LLVM::DITypeAttr DebugTypeGenerator::convertRecordType(
fir::RecordType Ty, mlir::LLVM::DIFileAttr fileAttr,
mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp) {
// Check if this type has already been converted.
auto iter = typeCache.find(Ty);
if (iter != typeCache.end())
return iter->second;

bool canCacheThisType = true;
llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
if (mlir::LLVM::DITypeAttr attr = derivedTypeCache.lookup(Ty))
return attr;

mlir::MLIRContext *context = module.getContext();
auto recId = mlir::DistinctAttr::create(mlir::UnitAttr::get(context));
auto [nameKind, sourceName] = fir::NameUniquer::deconstruct(Ty.getName());
if (nameKind != fir::NameUniquer::NameKind::DERIVED_TYPE)
return genPlaceholderType(context);

llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
// Generate a place holder TypeAttr which will be used if a member
// references the parent type.
auto comAttr = mlir::LLVM::DICompositeTypeAttr::get(
auto recId = mlir::DistinctAttr::create(mlir::UnitAttr::get(context));
auto placeHolder = mlir::LLVM::DICompositeTypeAttr::get(
context, recId, /*isRecSelf=*/true, llvm::dwarf::DW_TAG_structure_type,
mlir::StringAttr::get(context, ""), fileAttr, /*line=*/0, scope,
/*baseType=*/nullptr, mlir::LLVM::DIFlags::Zero, /*sizeInBits=*/0,
/*alignInBits=*/0, elements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
/*allocated=*/nullptr, /*associated=*/nullptr);
typeCache[Ty] = comAttr;

auto result = fir::NameUniquer::deconstruct(Ty.getName());
if (result.first != fir::NameUniquer::NameKind::DERIVED_TYPE)
return genPlaceholderType(context);
DerivedTypeCache::ActiveLevels nestedRecursions =
derivedTypeCache.startTranslating(Ty, placeHolder);

fir::TypeInfoOp tiOp = symbolTable->lookup<fir::TypeInfoOp>(Ty.getName());
unsigned line = (tiOp) ? getLineFromLoc(tiOp.getLoc()) : 1;
Expand All @@ -305,6 +400,7 @@ mlir::LLVM::DITypeAttr DebugTypeGenerator::convertRecordType(
mlir::IntegerType intTy = mlir::IntegerType::get(context, 64);
std::uint64_t offset = 0;
for (auto [fieldName, fieldTy] : Ty.getTypeList()) {
derivedTypeCache.preComponentVisitUpdate();
auto [byteSize, byteAlign] = getFieldSizeAndAlign(fieldTy);
std::optional<llvm::ArrayRef<int64_t>> lowerBounds =
fir::getComponentLowerBoundsIfNonDefault(Ty, fieldName, module,
Expand All @@ -317,22 +413,22 @@ mlir::LLVM::DITypeAttr DebugTypeGenerator::convertRecordType(
mlir::LLVM::DITypeAttr elemTy;
if (lowerBounds && seqTy &&
lowerBounds->size() == seqTy.getShape().size()) {
llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
llvm::SmallVector<mlir::LLVM::DINodeAttr> arrayElements;
for (auto [bound, dim] :
llvm::zip_equal(*lowerBounds, seqTy.getShape())) {
auto countAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, dim));
auto lowerAttr = mlir::IntegerAttr::get(intTy, llvm::APInt(64, bound));
auto subrangeTy = mlir::LLVM::DISubrangeAttr::get(
context, countAttr, lowerAttr, /*upperBound=*/nullptr,
/*stride=*/nullptr);
elements.push_back(subrangeTy);
arrayElements.push_back(subrangeTy);
}
elemTy = mlir::LLVM::DICompositeTypeAttr::get(
context, llvm::dwarf::DW_TAG_array_type, /*name=*/nullptr,
/*file=*/nullptr, /*line=*/0, /*scope=*/nullptr,
convertType(seqTy.getEleTy(), fileAttr, scope, declOp),
mlir::LLVM::DIFlags::Zero, /*sizeInBits=*/0, /*alignInBits=*/0,
elements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
arrayElements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
/*allocated=*/nullptr, /*associated=*/nullptr);
} else
elemTy = convertType(fieldTy, fileAttr, scope, /*declOp=*/nullptr);
Expand All @@ -344,96 +440,37 @@ mlir::LLVM::DITypeAttr DebugTypeGenerator::convertRecordType(
/*extra data=*/nullptr);
elements.push_back(tyAttr);
offset += llvm::alignTo(byteSize, byteAlign);

// Currently, the handling of recursive debug type in mlir has some
// limitations that were discussed at the end of the thread for following
// PR.
// https://github.com/llvm/llvm-project/pull/106571
//
// Problem could be explained with the following example code:
// type t2
// type(t1), pointer :: p1
// end type
// type t1
// type(t2), pointer :: p2
// end type
// In the description below, type_self means a temporary type that is
// generated
// as a place holder while the members of that type are being processed.
//
// If we process t1 first then we will have the following structure after
// it has been processed.
// t1 -> t2 -> t1_self
// This is because when we started processing t2, we did not have the
// complete t1 but its place holder t1_self.
// Now if some entity requires t2, we will already have that in cache and
// will return it. But this t2 refers to t1_self and not to t1. In mlir
// handling, only those types are allowed to have _self reference which are
// wrapped by entity whose reference it is. So t1 -> t2 -> t1_self is ok
// because the t1_self reference can be resolved by the outer t1. But
// standalone t2 is not because there will be no way to resolve it. Until
// this is fixed in mlir, we avoid caching such types. Please see
// DebugTranslation::translateRecursive for details on how mlir handles
// recursive types.
// The code below checks for situation where it will be unsafe to cache
// a type to avoid this problem. We do that in 2 situations.
// 1. If a member is record type, then its type would have been processed
// before reaching here. If it is not in the cache, it means that it was
// found to be unsafe to cache. So any type containing it will also not
// be cached
// 2. The type of the member is found in the cache but it is a place holder.
// In this case, its recID should match the recID of the type we are
// processing. This helps us to cache the following type.
// type t
// type(t), allocatable :: p
// end type
mlir::Type baseTy = getDerivedType(fieldTy);
if (auto recTy = mlir::dyn_cast<fir::RecordType>(baseTy)) {
auto iter = typeCache.find(recTy);
if (iter == typeCache.end())
canCacheThisType = false;
else {
if (auto tyAttr =
mlir::dyn_cast<mlir::LLVM::DICompositeTypeAttr>(iter->second)) {
if (tyAttr.getIsRecSelf() && tyAttr.getRecId() != recId)
canCacheThisType = false;
}
}
}
derivedTypeCache.postComponentVisitUpdate(nestedRecursions);
}

auto finalAttr = mlir::LLVM::DICompositeTypeAttr::get(
context, recId, /*isRecSelf=*/false, llvm::dwarf::DW_TAG_structure_type,
mlir::StringAttr::get(context, result.second.name), fileAttr, line, scope,
mlir::StringAttr::get(context, sourceName.name), fileAttr, line, scope,
/*baseType=*/nullptr, mlir::LLVM::DIFlags::Zero, offset * 8,
/*alignInBits=*/0, elements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
/*allocated=*/nullptr, /*associated=*/nullptr);

// derivedTypeDepth == 1 means that it is a top level type which is safe to
// cache.
if (canCacheThisType || derivedTypeDepth == 1) {
typeCache[Ty] = finalAttr;
} else {
auto iter = typeCache.find(Ty);
if (iter != typeCache.end())
typeCache.erase(iter);
}
derivedTypeCache.finalize(Ty, finalAttr, std::move(nestedRecursions));

return finalAttr;
}

mlir::LLVM::DITypeAttr DebugTypeGenerator::convertTupleType(
mlir::TupleType Ty, mlir::LLVM::DIFileAttr fileAttr,
mlir::LLVM::DIScopeAttr scope, fir::cg::XDeclareOp declOp) {
// Check if this type has already been converted.
auto iter = typeCache.find(Ty);
if (iter != typeCache.end())
return iter->second;
if (mlir::LLVM::DITypeAttr attr = derivedTypeCache.lookup(Ty))
return attr;

DerivedTypeCache::ActiveLevels nestedRecursions =
derivedTypeCache.startTranslating(Ty);

llvm::SmallVector<mlir::LLVM::DINodeAttr> elements;
mlir::MLIRContext *context = module.getContext();

std::uint64_t offset = 0;
for (auto fieldTy : Ty.getTypes()) {
derivedTypeCache.preComponentVisitUpdate();
auto [byteSize, byteAlign] = getFieldSizeAndAlign(fieldTy);
mlir::LLVM::DITypeAttr elemTy =
convertType(fieldTy, fileAttr, scope, /*declOp=*/nullptr);
Expand All @@ -445,6 +482,7 @@ mlir::LLVM::DITypeAttr DebugTypeGenerator::convertTupleType(
/*extra data=*/nullptr);
elements.push_back(tyAttr);
offset += llvm::alignTo(byteSize, byteAlign);
derivedTypeCache.postComponentVisitUpdate(nestedRecursions);
}

auto typeAttr = mlir::LLVM::DICompositeTypeAttr::get(
Expand All @@ -453,7 +491,7 @@ mlir::LLVM::DITypeAttr DebugTypeGenerator::convertTupleType(
/*baseType=*/nullptr, mlir::LLVM::DIFlags::Zero, offset * 8,
/*alignInBits=*/0, elements, /*dataLocation=*/nullptr, /*rank=*/nullptr,
/*allocated=*/nullptr, /*associated=*/nullptr);
typeCache[Ty] = typeAttr;
derivedTypeCache.finalize(Ty, typeAttr, std::move(nestedRecursions));
return typeAttr;
}

Expand Down Expand Up @@ -667,27 +705,7 @@ DebugTypeGenerator::convertType(mlir::Type Ty, mlir::LLVM::DIFileAttr fileAttr,
return convertCharacterType(charTy, fileAttr, scope, declOp,
/*hasDescriptor=*/false);
} else if (auto recTy = mlir::dyn_cast_if_present<fir::RecordType>(Ty)) {
// For nested derived types like shown below, the call sequence of the
// convertRecordType will look something like as follows:
// convertRecordType (t1)
// convertRecordType (t2)
// convertRecordType (t3)
// We need to recognize when we are processing the top level type like t1
// to make caching decision. The variable `derivedTypeDepth` is used for
// this purpose and maintains the current depth of derived type processing.
// type t1
// type(t2), pointer :: p1
// end type
// type t2
// type(t3), pointer :: p2
// end type
// type t2
// integer a
// end type
derivedTypeDepth++;
auto result = convertRecordType(recTy, fileAttr, scope, declOp);
derivedTypeDepth--;
return result;
return convertRecordType(recTy, fileAttr, scope, declOp);
} else if (auto tupleTy = mlir::dyn_cast_if_present<mlir::TupleType>(Ty)) {
return convertTupleType(tupleTy, fileAttr, scope, declOp);
} else if (auto refTy = mlir::dyn_cast_if_present<fir::ReferenceType>(Ty)) {
Expand Down
Loading
Loading