-
Notifications
You must be signed in to change notification settings - Fork 15.2k
[NFC][TableGen] Delete extra spaces in comments #147004
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
jurahul
merged 1 commit into
llvm:main
from
jurahul:tablegen_delete_extra_space_in_comments
Jul 4, 2025
Merged
[NFC][TableGen] Delete extra spaces in comments #147004
jurahul
merged 1 commit into
llvm:main
from
jurahul:tablegen_delete_extra_space_in_comments
Jul 4, 2025
Conversation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Member
|
@llvm/pr-subscribers-tablegen Author: Rahul Joshi (jurahul) ChangesPatch is 35.43 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/147004.diff 10 Files Affected:
diff --git a/llvm/include/llvm/TableGen/Record.h b/llvm/include/llvm/TableGen/Record.h
index 5849344bcb0b5..a2b86eb8e7cad 100644
--- a/llvm/include/llvm/TableGen/Record.h
+++ b/llvm/include/llvm/TableGen/Record.h
@@ -868,7 +868,7 @@ class UnOpInit final : public OpInit, public FoldingSetNode {
UnaryOp getOpcode() const { return (UnaryOp)Opc; }
const Init *getOperand() const { return LHS; }
- // Fold - If possible, fold this to a simpler init. Return this if not
+ // Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
const Init *Fold(const Record *CurRec, bool IsFinal = false) const;
@@ -940,7 +940,7 @@ class BinOpInit final : public OpInit, public FoldingSetNode {
std::optional<bool> CompareInit(unsigned Opc, const Init *LHS,
const Init *RHS) const;
- // Fold - If possible, fold this to a simpler init. Return this if not
+ // Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
const Init *Fold(const Record *CurRec) const;
@@ -990,7 +990,7 @@ class TernOpInit final : public OpInit, public FoldingSetNode {
const Init *getMHS() const { return MHS; }
const Init *getRHS() const { return RHS; }
- // Fold - If possible, fold this to a simpler init. Return this if not
+ // Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
const Init *Fold(const Record *CurRec) const;
@@ -1096,7 +1096,7 @@ class FoldOpInit final : public TypedInit, public FoldingSetNode {
void Profile(FoldingSetNodeID &ID) const;
- // Fold - If possible, fold this to a simpler init. Return this if not
+ // Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
const Init *Fold(const Record *CurRec) const;
@@ -1129,7 +1129,7 @@ class IsAOpInit final : public TypedInit, public FoldingSetNode {
void Profile(FoldingSetNodeID &ID) const;
- // Fold - If possible, fold this to a simpler init. Return this if not
+ // Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
const Init *Fold() const;
@@ -1163,7 +1163,7 @@ class ExistsOpInit final : public TypedInit, public FoldingSetNode {
void Profile(FoldingSetNodeID &ID) const;
- // Fold - If possible, fold this to a simpler init. Return this if not
+ // Fold - If possible, fold this to a simpler init. Return this if not
// possible to fold.
const Init *Fold(const Record *CurRec, bool IsFinal = false) const;
@@ -1412,8 +1412,8 @@ class FieldInit final : public TypedInit {
}
};
-/// (v a, b) - Represent a DAG tree value. DAG inits are required
-/// to have at least one value then a (possibly empty) list of arguments. Each
+/// (v a, b) - Represent a DAG tree value. DAG inits are required
+/// to have at least one value then a (possibly empty) list of arguments. Each
/// argument can have a name associated with it.
class DagInit final
: public TypedInit,
diff --git a/llvm/include/llvm/TableGen/SetTheory.h b/llvm/include/llvm/TableGen/SetTheory.h
index 771dcff2f214c..53c78e4572927 100644
--- a/llvm/include/llvm/TableGen/SetTheory.h
+++ b/llvm/include/llvm/TableGen/SetTheory.h
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
// This file implements the SetTheory class that computes ordered sets of
-// Records from DAG expressions. Operators for standard set operations are
+// Records from DAG expressions. Operators for standard set operations are
// predefined, and it is possible to add special purpose set operators as well.
//
// The user may define named sets as Records of predefined classes. Set
@@ -112,7 +112,7 @@ class SetTheory {
void addExpander(StringRef ClassName, std::unique_ptr<Expander>);
/// addFieldExpander - Add an expander for ClassName that simply evaluates
- /// FieldName in the Record to get the set elements. That is all that is
+ /// FieldName in the Record to get the set elements. That is all that is
/// needed for a class like:
///
/// class Set<dag d> {
@@ -134,7 +134,7 @@ class SetTheory {
evaluate(*begin++, Elts, Loc);
}
- /// expand - Expand a record into a set of elements if possible. Return a
+ /// expand - Expand a record into a set of elements if possible. Return a
/// pointer to the expanded elements, or NULL if Set cannot be expanded
/// further.
const RecVec *expand(const Record *Set);
diff --git a/llvm/include/llvm/TableGen/StringMatcher.h b/llvm/include/llvm/TableGen/StringMatcher.h
index 49769883a98b4..70554de1a6f17 100644
--- a/llvm/include/llvm/TableGen/StringMatcher.h
+++ b/llvm/include/llvm/TableGen/StringMatcher.h
@@ -26,7 +26,7 @@ class raw_ostream;
/// simple switch tree to classify the input string.
///
/// If a match is found, the code in Matches[i].second is executed; control must
-/// not exit this code fragment. If nothing matches, execution falls through.
+/// not exit this code fragment. If nothing matches, execution falls through.
class StringMatcher {
public:
using StringPair = std::pair<std::string, std::string>;
diff --git a/llvm/lib/TableGen/DetailedRecordsBackend.cpp b/llvm/lib/TableGen/DetailedRecordsBackend.cpp
index cf697599e53a2..1ed64356b7c62 100644
--- a/llvm/lib/TableGen/DetailedRecordsBackend.cpp
+++ b/llvm/lib/TableGen/DetailedRecordsBackend.cpp
@@ -1,4 +1,4 @@
-//===- DetailedRecordBackend.cpp - Detailed Records Report -*- C++ -*-===//
+//===- DetailedRecordBackend.cpp - Detailed Records Report ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -6,7 +6,7 @@
//
//===----------------------------------------------------------------------===//
//
-// This Tablegen backend prints a report that includes all the global
+// This Tablegen backend prints a report that includes all the global
// variables, classes, and records in complete detail. It includes more
// detail than the default TableGen printer backend.
//
diff --git a/llvm/lib/TableGen/Main.cpp b/llvm/lib/TableGen/Main.cpp
index ea716215e0679..f545706d6fe30 100644
--- a/llvm/lib/TableGen/Main.cpp
+++ b/llvm/lib/TableGen/Main.cpp
@@ -8,7 +8,7 @@
//
// TableGen is a tool which can be used to build up a description of something,
// then invoke one or more "tablegen backends" to emit information about the
-// description in some predefined format. In practice, this is used by the LLVM
+// description in some predefined format. In practice, this is used by the LLVM
// code generators to automate generation of a code generator through a
// high-level description of the target.
//
@@ -156,7 +156,7 @@ int llvm::TableGenMain(const char *argv0,
return 1;
// Always write the depfile, even if the main output hasn't changed.
- // If it's missing, Ninja considers the output dirty. If this was below
+ // If it's missing, Ninja considers the output dirty. If this was below
// the early exit below and someone deleted the .inc.d file but not the .inc
// file, tablegen would never write the depfile.
if (!DependFilename.empty()) {
diff --git a/llvm/lib/TableGen/Record.cpp b/llvm/lib/TableGen/Record.cpp
index 7f2ed77a74099..1f3e5dc68f1d6 100644
--- a/llvm/lib/TableGen/Record.cpp
+++ b/llvm/lib/TableGen/Record.cpp
@@ -500,7 +500,7 @@ const Init *BitsInit::convertInitializerTo(const RecTy *Ty) const {
}
if (auto *BRT = dyn_cast<BitsRecTy>(Ty)) {
- // If the number of bits is right, return it. Otherwise we need to expand
+ // If the number of bits is right, return it. Otherwise we need to expand
// or truncate.
if (getNumBits() != BRT->getNumBits()) return nullptr;
return this;
@@ -944,7 +944,7 @@ const Init *UnOpInit::Fold(const Record *CurRec, bool IsFinal) const {
case TAIL:
if (const auto *LHSl = dyn_cast<ListInit>(LHS)) {
assert(!LHSl->empty() && "Empty list in tail");
- // Note the slice(1). We can't just pass the result of getElements()
+ // Note the slice(1). We can't just pass the result of getElements()
// directly.
return ListInit::get(LHSl->getElements().slice(1),
LHSl->getElementType());
@@ -2921,16 +2921,16 @@ void Record::setName(const Init *NewName) {
Name = NewName;
checkName();
// DO NOT resolve record values to the name at this point because
- // there might be default values for arguments of this def. Those
+ // there might be default values for arguments of this def. Those
// arguments might not have been resolved yet so we don't want to
// prematurely assume values for those arguments were not passed to
// this def.
//
// Nonetheless, it may be that some of this Record's values
- // reference the record name. Indeed, the reason for having the
- // record name be an Init is to provide this flexibility. The extra
+ // reference the record name. Indeed, the reason for having the
+ // record name be an Init is to provide this flexibility. The extra
// resolve steps after completely instantiating defs takes care of
- // this. See TGParser::ParseDef and TGParser::ParseDefm.
+ // this. See TGParser::ParseDef and TGParser::ParseDefm.
}
void Record::resolveReferences(Resolver &R, const RecordVal *SkipVal) {
diff --git a/llvm/lib/TableGen/TGLexer.cpp b/llvm/lib/TableGen/TGLexer.cpp
index 46487cba9453e..c8e020d791e09 100644
--- a/llvm/lib/TableGen/TGLexer.cpp
+++ b/llvm/lib/TableGen/TGLexer.cpp
@@ -102,7 +102,7 @@ SMRange TGLexer::getLocRange() const {
}
/// ReturnError - Set the error to the specified string at the specified
-/// location. This is defined to always return tgtok::Error.
+/// location. This is defined to always return tgtok::Error.
tgtok::TokKind TGLexer::ReturnError(SMLoc Loc, const Twine &Msg) {
PrintError(Loc, Msg);
return tgtok::Error;
@@ -116,7 +116,7 @@ bool TGLexer::processEOF() {
SMLoc ParentIncludeLoc = SrcMgr.getParentIncludeLoc(CurBuffer);
if (ParentIncludeLoc != SMLoc()) {
// If prepExitInclude() detects a problem with the preprocessing
- // control stack, it will return false. Pretend that we reached
+ // control stack, it will return false. Pretend that we reached
// the final EOF and stop lexing more tokens by returning false
// to LexToken().
if (!prepExitInclude(false))
@@ -147,7 +147,7 @@ int TGLexer::getNextChar() {
case 0: {
// A NUL character in the stream is either the end of the current buffer or
- // a spurious NUL in the file. Disambiguate that here.
+ // a spurious NUL in the file. Disambiguate that here.
if (CurPtr - 1 == CurBuf.end()) {
--CurPtr; // Arrange for another call to return EOF again.
return EOF;
@@ -160,7 +160,7 @@ int TGLexer::getNextChar() {
case '\n':
case '\r':
// Handle the newline character by ignoring it and incrementing the line
- // count. However, be careful about 'dos style' files with \n\r in them.
+ // count. However, be careful about 'dos style' files with \n\r in them.
// Only treat a \n\r or \r\n as a single line.
if ((*CurPtr == '\n' || (*CurPtr == '\r')) &&
*CurPtr != CurChar)
@@ -259,7 +259,7 @@ tgtok::TokKind TGLexer::LexToken(bool FileOrLineStart) {
int NextChar = 0;
if (isDigit(CurChar)) {
// Allow identifiers to start with a number if it is followed by
- // an identifier. This can happen with paste operations like
+ // an identifier. This can happen with paste operations like
// foo#8i.
int i = 0;
do {
@@ -422,7 +422,7 @@ tgtok::TokKind TGLexer::LexIdentifier() {
return Kind;
}
-/// LexInclude - We just read the "include" token. Get the string token that
+/// LexInclude - We just read the "include" token. Get the string token that
/// comes next and enter the include.
bool TGLexer::LexInclude() {
// The token after the include must be a string.
@@ -461,7 +461,7 @@ void TGLexer::SkipBCPLComment() {
CurPtr = (EOLPos == StringRef::npos) ? CurBuf.end() : CurBuf.data() + EOLPos;
}
-/// SkipCComment - This skips C-style /**/ comments. The only difference from C
+/// SkipCComment - This skips C-style /**/ comments. The only difference from C
/// is that we allow nesting.
bool TGLexer::SkipCComment() {
++CurPtr; // skip the star.
@@ -554,8 +554,8 @@ tgtok::TokKind TGLexer::LexNumber() {
return Base == 2 ? tgtok::BinaryIntVal : tgtok::IntVal;
}
-/// LexBracket - We just read '['. If this is a code block, return it,
-/// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
+/// LexBracket - We just read '['. If this is a code block, return it,
+/// otherwise return the bracket. Match: '[' and '[{ ( [^}]+ | }[^]] )* }]'
tgtok::TokKind TGLexer::LexBracket() {
if (CurPtr[0] != '{')
return tgtok::l_square;
@@ -687,7 +687,7 @@ tgtok::TokKind TGLexer::prepIsDirective() const {
NextChar == '\n' ||
// It looks like TableGen does not support '\r' as the actual
// carriage return, e.g. getNextChar() treats a single '\r'
- // as '\n'. So we do the same here.
+ // as '\n'. So we do the same here.
NextChar == '\r')
return Kind;
@@ -700,7 +700,7 @@ tgtok::TokKind TGLexer::prepIsDirective() const {
// #define/**/ AND #define//
//
// These cases will be reported as incorrect after calling
- // prepLexMacroName(). We could have supported C-style comments
+ // prepLexMacroName(). We could have supported C-style comments
// after #ifdef/#define, but this would complicate the code
// for little benefit.
if (NextChar == '/') {
@@ -733,7 +733,7 @@ void TGLexer::prepEatPreprocessorDirective(tgtok::TokKind Kind) {
tgtok::TokKind TGLexer::lexPreprocessor(tgtok::TokKind Kind,
bool ReturnNextLiveToken) {
- // We must be looking at a preprocessing directive. Eat it!
+ // We must be looking at a preprocessing directive. Eat it!
prepEatPreprocessorDirective(Kind);
if (Kind == tgtok::Ifdef || Kind == tgtok::Ifndef) {
@@ -879,7 +879,7 @@ bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) {
tgtok::TokKind Kind = prepIsDirective();
// If we did not find a preprocessing directive or it is #define,
- // then just skip to the next line. We do not have to do anything
+ // then just skip to the next line. We do not have to do anything
// for #define in the line-skipping mode.
if (Kind == tgtok::Error || Kind == tgtok::Define)
continue;
@@ -909,7 +909,7 @@ bool TGLexer::prepSkipRegion(bool MustNeverBeFalse) {
} while (CurPtr != CurBuf.end());
// We have reached the end of the file, but never left the lines-skipping
- // mode. This means there is no matching #endif.
+ // mode. This means there is no matching #endif.
prepReportPreprocessorStackError();
return false;
}
@@ -939,8 +939,8 @@ bool TGLexer::prepSkipLineBegin() {
// Skip C-style comment.
// Note that we do not care about skipping the C++-style comments.
// If the line contains "//", it may not contain any processable
- // preprocessing directive. Just return CurPtr pointing to
- // the first '/' in this case. We also do not care about
+ // preprocessing directive. Just return CurPtr pointing to
+ // the first '/' in this case. We also do not care about
// incorrect symbols after the first '/' - we are in lines-skipping
// mode, so incorrect code is allowed to some extent.
@@ -968,7 +968,7 @@ bool TGLexer::prepSkipLineBegin() {
++CurPtr;
}
- // We have reached the end of the file. Return to the lines skipping
+ // We have reached the end of the file. Return to the lines skipping
// code, and allow it to handle the EOF as needed.
return true;
}
@@ -994,7 +994,7 @@ bool TGLexer::prepSkipDirectiveEnd() {
SkipBCPLComment();
} else if (NextChar == '*') {
// When we are skipping C-style comment at the end of a preprocessing
- // directive, we can skip several lines. If any meaningful TD token
+ // directive, we can skip several lines. If any meaningful TD token
// follows the end of the C-style comment on the same line, it will
// be considered as an invalid usage of TD token.
// For example, we want to forbid usages like this one:
@@ -1003,7 +1003,7 @@ bool TGLexer::prepSkipDirectiveEnd() {
// #define MACRO /* This macro is used
// to ... */ class Class {}
// One can argue that this should be allowed, but it does not seem
- // to be worth of the complication. Moreover, this matches
+ // to be worth of the complication. Moreover, this matches
// the C preprocessor behavior.
// Set TokStart to the beginning of the comment to enable proper
diff --git a/llvm/lib/TableGen/TGLexer.h b/llvm/lib/TableGen/TGLexer.h
index a5b10b37e9886..ed7d8f3baae59 100644
--- a/llvm/lib/TableGen/TGLexer.h
+++ b/llvm/lib/TableGen/TGLexer.h
@@ -61,7 +61,7 @@ enum TokKind {
// Integer value.
IntVal,
- // Binary constant. Note that these are sized according to the number of
+ // Binary constant. Note that these are sized according to the number of
// bits given.
BinaryIntVal,
@@ -280,7 +280,7 @@ class TGLexer {
//
// An ordered list of preprocessing controls defined by #ifdef/#else
// directives that are in effect currently is called preprocessing
- // control stack. It is represented as a vector of PreprocessorControlDesc's.
+ // control stack. It is represented as a vector of PreprocessorControlDesc's.
//
// The control stack is updated according to the following rules:
//
@@ -321,9 +321,9 @@ class TGLexer {
// EOF
//
// To do this, we clear the preprocessing control stack on entry
- // to each of the included file. PrepIncludeStack is used to store
+ // to each of the included file. PrepIncludeStack is used to store
// preprocessing control stacks for the current file and all its
- // parent files. The back() element is the preprocessing control
+ // parent files. The back() element is the preprocessing control
// stack for the current file.
SmallVector<SmallVector<PreprocessorControlDesc>> PrepIncludeStack;
@@ -332,7 +332,7 @@ class TGLexer {
//
// If IncludeStackMustBeEmpty is true, the include stack must be empty
// after the popping, otherwise, the include stack must not be empty
- // after the popping. Basically, the include stack must be empty
+ // after the popping. Basically, the include stack must be empty
// only if we exit the "top-level" file (i.e. finish lexing).
//
// The method returns false, if the current preprocessing control stack
@@ -340,8 +340,8 @@ class TGLexer {
// true - otherwise.
bool prepExitInclude(bool IncludeStackMustBeEmpty);
- // Look ahead for a preprocessing directive starting from CurPtr. The caller
- // must only call this method, if *(CurPtr - 1) is '#'. If the method matches
+ // Look ahead for a preprocessing directive starting from CurPtr. The caller
+ // must only call this method, if *(CurPtr - 1) is '#'. If the method matches
// a preprocessing directive word followed by a whitespace, then it returns
// one of the internal token kinds, i.e. Ifdef, Else, Endif, Define.
//
@@ -353,26 +353,26 @@ class TGLexer {
//
// We use look-ahead prepIsDirective() and prepEatPreprocessorDirective()
// to avoid adjusting CurPtr before we are sure that '#' is followed
- // by a preprocessing directive. If it is not, then we fall back to
+ // by a preprocessing directive. If it is not, then we fall back to
// tgtok::paste interpretation of '#'.
void prepEatPreprocessorDirective(tgtok::TokKind Kind);
// The main "exit" point from the token parsing to preprocessor.
//
// The method is called for CurPtr, when prepIsDirective() returns
- // true. The first parameter matches the result of prepIsDirective(),
+ // true. The first parameter matches the result of prepIsDirective(),
// denoting the actual preprocessor directive to be processed.
//
// If the preprocessing directive disables the tokens processing, e.g.:
// #ifdef NAME // NA...
[truncated]
|
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
No description provided.