21#include "llvm/Support/Regex.h"
29 llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
34 Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
35 Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
39 Lex.reset(
new Lexer(
ID, SourceMgr.getBufferOrFake(
ID), SourceMgr, LangOpts));
40 Lex->SetKeepWhitespaceMode(
true);
43 auto Identifier = &IdentTable.get(ForEachMacro);
46 for (
const std::string &IfMacro : Style.
IfMacros) {
51 auto Identifier = &IdentTable.get(AttributeMacro);
55 auto Identifier = &IdentTable.get(StatementMacro);
59 auto Identifier = &IdentTable.get(TypenameMacro);
63 auto Identifier = &IdentTable.get(NamespaceMacro);
66 for (
const std::string &WhitespaceSensitiveMacro :
68 auto Identifier = &IdentTable.get(WhitespaceSensitiveMacro);
71 for (
const std::string &StatementAttributeLikeMacro :
73 auto Identifier = &IdentTable.get(StatementAttributeLikeMacro);
78 MacrosSkippedByRemoveParentheses.insert(&IdentTable.get(
Macro));
80 TemplateNames.insert(&IdentTable.get(TemplateName));
81 for (
const auto &TypeName : Style.
TypeNames)
82 TypeNames.insert(&IdentTable.get(TypeName));
84 VariableTemplates.insert(&IdentTable.get(VariableTemplate));
88 assert(Tokens.empty());
89 assert(FirstInLineIndex == 0);
90 enum { FO_None, FO_CurrentLine, FO_NextLine } FormatOff = FO_None;
92 Tokens.push_back(getNextToken());
93 auto &Tok = *Tokens.back();
94 const auto NewlinesBefore = Tok.NewlinesBefore;
97 if (NewlinesBefore == 0)
103 if (NewlinesBefore > 1) {
106 Tok.Finalized =
true;
107 FormatOff = FO_CurrentLine;
111 if (!FormattingDisabled && FormatOffRegex.match(Tok.TokenText)) {
112 if (Tok.is(tok::comment) &&
113 (NewlinesBefore > 0 || Tokens.size() == 1)) {
114 Tok.Finalized =
true;
115 FormatOff = FO_NextLine;
117 for (
auto *
Token : reverse(Tokens)) {
118 Token->Finalized =
true;
119 if (
Token->NewlinesBefore > 0)
122 FormatOff = FO_CurrentLine;
127 tryParseJSRegexLiteral();
128 handleTemplateStrings();
130 tryParsePythonComment();
132 tryMergePreviousTokens();
136 handleCSharpVerbatimAndInterpolatedStrings();
138 handleTableGenMultilineString();
139 handleTableGenNumericLikeIdentifier();
141 if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
142 FirstInLineIndex = Tokens.size() - 1;
143 }
while (Tokens.back()->isNot(tok::eof));
145 auto &TokEOF = *Tokens.back();
146 if (TokEOF.NewlinesBefore == 0) {
147 TokEOF.NewlinesBefore = 1;
148 TokEOF.OriginalColumn = 0;
154void FormatTokenLexer::tryMergePreviousTokens() {
155 if (tryMerge_TMacro())
157 if (tryMergeConflictMarkers())
159 if (tryMergeLessLess())
161 if (tryMergeGreaterGreater())
163 if (tryMergeForEach())
165 if (Style.
isCpp() && tryTransformTryUsageForC())
170 tryMergeUserDefinedLiteral()) {
175 static const tok::TokenKind NullishCoalescingOperator[] = {tok::question,
177 static const tok::TokenKind NullPropagatingOperator[] = {tok::question,
179 static const tok::TokenKind FatArrow[] = {tok::equal, tok::greater};
181 if (tryMergeTokens(FatArrow, TT_FatArrow))
183 if (tryMergeTokens(NullishCoalescingOperator, TT_NullCoalescingOperator)) {
185 Tokens.back()->Tok.setKind(tok::pipepipe);
188 if (tryMergeTokens(NullPropagatingOperator, TT_NullPropagatingOperator)) {
190 Tokens.back()->Tok.setKind(tok::period);
193 if (tryMergeNullishCoalescingEqual())
198 tok::question, tok::l_square};
200 if (tryMergeCSharpKeywordVariables())
202 if (tryMergeCSharpStringLiteral())
204 if (tryTransformCSharpForEach())
206 if (tryMergeTokens(CSharpNullConditionalLSquare,
207 TT_CSharpNullConditionalLSquare)) {
209 Tokens.back()->Tok.setKind(tok::l_square);
215 if (tryMergeNSStringLiteral())
219 static const tok::TokenKind JSIdentity[] = {tok::equalequal, tok::equal};
222 static const tok::TokenKind JSShiftEqual[] = {tok::greater, tok::greater,
224 static const tok::TokenKind JSExponentiation[] = {tok::star, tok::star};
227 static const tok::TokenKind JSPipePipeEqual[] = {tok::pipepipe, tok::equal};
228 static const tok::TokenKind JSAndAndEqual[] = {tok::ampamp, tok::equal};
231 if (tryMergeTokens(JSIdentity, TT_BinaryOperator))
233 if (tryMergeTokens(JSNotIdentity, TT_BinaryOperator))
235 if (tryMergeTokens(JSShiftEqual, TT_BinaryOperator))
237 if (tryMergeTokens(JSExponentiation, TT_JsExponentiation))
239 if (tryMergeTokens(JSExponentiationEqual, TT_JsExponentiationEqual)) {
240 Tokens.back()->Tok.setKind(tok::starequal);
243 if (tryMergeTokens(JSAndAndEqual, TT_JsAndAndEqual) ||
244 tryMergeTokens(JSPipePipeEqual, TT_JsPipePipeEqual)) {
246 Tokens.back()->Tok.setKind(tok::equal);
249 if (tryMergeJSPrivateIdentifier())
251 }
else if (Style.
isJava()) {
253 tok::greater, tok::greater, tok::greaterequal};
254 if (tryMergeTokens(JavaRightLogicalShiftAssign, TT_BinaryOperator))
258 if (Tokens.size() >= 3 && Tokens.end()[-3]->is(TT_VerilogNumberBase) &&
259 Tokens.end()[-2]->is(tok::numeric_constant) &&
260 Tokens.back()->isOneOf(tok::numeric_constant, tok::identifier,
262 tryMergeTokens(2, TT_Unknown)) {
266 if (tryMergeTokensAny({{tok::minus, tok::colon}, {tok::plus, tok::colon}},
274 if (Tokens.back()->TokenText.size() == 1 &&
275 tryMergeTokensAny({{tok::caret, tok::tilde}, {tok::tilde, tok::caret}},
276 TT_BinaryOperator)) {
277 Tokens.back()->Tok.setKind(tok::caret);
281 if (tryMergeTokens({tok::less, tok::less}, TT_BinaryOperator)) {
282 Tokens.back()->Tok.setKind(tok::lessless);
285 if (tryMergeTokens({tok::greater, tok::greater}, TT_BinaryOperator)) {
286 Tokens.back()->Tok.setKind(tok::greatergreater);
289 if (tryMergeTokensAny({{tok::lessless, tok::equal},
290 {tok::lessless, tok::lessequal},
291 {tok::greatergreater, tok::equal},
292 {tok::greatergreater, tok::greaterequal},
293 {tok::colon, tok::equal},
294 {tok::colon, tok::slash}},
295 TT_BinaryOperator)) {
300 if (tryMergeTokensAny({{tok::star, tok::star},
301 {tok::lessless, tok::less},
302 {tok::greatergreater, tok::greater},
303 {tok::exclaimequal, tok::equal},
304 {tok::exclaimequal, tok::question},
305 {tok::equalequal, tok::equal},
306 {tok::equalequal, tok::question}},
307 TT_BinaryOperator)) {
312 if (tryMergeTokensAny({{tok::plusequal, tok::greater},
313 {tok::plus, tok::star, tok::greater},
314 {tok::minusequal, tok::greater},
315 {tok::minus, tok::star, tok::greater},
316 {tok::less, tok::arrow},
317 {tok::equal, tok::greater},
318 {tok::star, tok::greater},
319 {tok::pipeequal, tok::greater},
320 {tok::pipe, tok::arrow},
321 {tok::hash, tok::minus, tok::hash},
322 {tok::hash, tok::equal, tok::hash}},
323 TT_BinaryOperator) ||
324 Tokens.back()->is(tok::arrow)) {
330 if (tryMergeTokens({tok::l_square, tok::l_brace},
331 TT_TableGenMultiLineString)) {
333 Tokens.back()->setFinalizedType(TT_TableGenMultiLineString);
334 Tokens.back()->Tok.setKind(tok::string_literal);
339 if (tryMergeTokens({tok::exclaim, tok::identifier},
340 TT_TableGenBangOperator)) {
341 Tokens.back()->Tok.setKind(tok::identifier);
342 Tokens.back()->Tok.setIdentifierInfo(
nullptr);
343 if (Tokens.back()->TokenText ==
"!cond")
344 Tokens.back()->setFinalizedType(TT_TableGenCondOperator);
346 Tokens.back()->setFinalizedType(TT_TableGenBangOperator);
349 if (tryMergeTokens({tok::exclaim, tok::kw_if}, TT_TableGenBangOperator)) {
352 Tokens.back()->Tok.setKind(tok::identifier);
353 Tokens.back()->Tok.setIdentifierInfo(
nullptr);
354 Tokens.back()->setFinalizedType(TT_TableGenBangOperator);
358 if (tryMergeTokens({tok::plus, tok::numeric_constant}, TT_Unknown)) {
359 Tokens.back()->Tok.setKind(tok::numeric_constant);
362 if (tryMergeTokens({tok::minus, tok::numeric_constant}, TT_Unknown)) {
363 Tokens.back()->Tok.setKind(tok::numeric_constant);
369bool FormatTokenLexer::tryMergeNSStringLiteral() {
370 if (Tokens.size() < 2)
372 auto &At = *(Tokens.end() - 2);
373 auto &String = *(Tokens.end() - 1);
374 if (At->isNot(tok::at) || String->isNot(tok::string_literal))
376 At->Tok.setKind(tok::string_literal);
377 At->TokenText = StringRef(At->TokenText.begin(),
378 String->TokenText.end() - At->TokenText.begin());
379 At->ColumnWidth += String->ColumnWidth;
380 At->setType(TT_ObjCStringLiteral);
381 Tokens.erase(Tokens.end() - 1);
385bool FormatTokenLexer::tryMergeJSPrivateIdentifier() {
388 if (Tokens.size() < 2)
390 auto &Hash = *(Tokens.end() - 2);
392 if (Hash->isNot(tok::hash) ||
Identifier->isNot(tok::identifier))
394 Hash->Tok.setKind(tok::identifier);
396 StringRef(Hash->TokenText.begin(),
397 Identifier->TokenText.end() - Hash->TokenText.begin());
399 Hash->setType(TT_JsPrivateIdentifier);
400 Tokens.erase(Tokens.end() - 1);
409bool FormatTokenLexer::tryMergeCSharpStringLiteral() {
410 if (Tokens.size() < 2)
414 const auto String = *(Tokens.end() - 1);
415 if (String->isNot(tok::string_literal))
418 auto Prefix = *(Tokens.end() - 2);
419 if (Prefix->isNot(tok::at) && Prefix->TokenText !=
"$")
422 if (Tokens.size() > 2) {
423 const auto Tok = *(Tokens.end() - 3);
424 if ((Tok->TokenText ==
"$" && Prefix->is(tok::at)) ||
425 (Tok->is(tok::at) && Prefix->TokenText ==
"$")) {
427 Tok->ColumnWidth += Prefix->ColumnWidth;
428 Tokens.erase(Tokens.end() - 2);
434 Prefix->Tok.setKind(tok::string_literal);
436 StringRef(Prefix->TokenText.begin(),
437 String->TokenText.end() - Prefix->TokenText.begin());
438 Prefix->ColumnWidth += String->ColumnWidth;
439 Prefix->setType(TT_CSharpStringLiteral);
440 Tokens.erase(Tokens.end() - 1);
446const llvm::StringSet<> FormatTokenLexer::CSharpAttributeTargets = {
447 "assembly",
"module",
"field",
"event",
"method",
448 "param",
"property",
"return",
"type",
451bool FormatTokenLexer::tryMergeNullishCoalescingEqual() {
452 if (Tokens.size() < 2)
454 auto &NullishCoalescing = *(Tokens.end() - 2);
455 auto &
Equal = *(Tokens.end() - 1);
456 if (NullishCoalescing->isNot(TT_NullCoalescingOperator) ||
457 Equal->isNot(tok::equal)) {
460 NullishCoalescing->Tok.setKind(tok::equal);
461 NullishCoalescing->TokenText =
462 StringRef(NullishCoalescing->TokenText.begin(),
463 Equal->TokenText.end() - NullishCoalescing->TokenText.begin());
464 NullishCoalescing->ColumnWidth +=
Equal->ColumnWidth;
465 NullishCoalescing->setType(TT_NullCoalescingEqual);
466 Tokens.erase(Tokens.end() - 1);
470bool FormatTokenLexer::tryMergeCSharpKeywordVariables() {
471 if (Tokens.size() < 2)
473 const auto At = *(Tokens.end() - 2);
474 if (At->isNot(tok::at))
476 const auto Keyword = *(Tokens.end() - 1);
482 At->Tok.setKind(tok::identifier);
483 At->TokenText = StringRef(At->TokenText.begin(),
484 Keyword->TokenText.end() - At->TokenText.begin());
485 At->ColumnWidth +=
Keyword->ColumnWidth;
486 At->setType(
Keyword->getType());
487 Tokens.erase(Tokens.end() - 1);
492bool FormatTokenLexer::tryTransformCSharpForEach() {
506bool FormatTokenLexer::tryMergeForEach() {
507 if (Tokens.size() < 2)
509 auto &For = *(Tokens.end() - 2);
510 auto &Each = *(Tokens.end() - 1);
511 if (For->isNot(tok::kw_for))
513 if (Each->isNot(tok::identifier))
515 if (Each->TokenText !=
"each")
518 For->setType(TT_ForEachMacro);
519 For->Tok.setKind(tok::kw_for);
521 For->TokenText = StringRef(For->TokenText.begin(),
522 Each->TokenText.end() - For->TokenText.begin());
523 For->ColumnWidth += Each->ColumnWidth;
524 Tokens.erase(Tokens.end() - 1);
528bool FormatTokenLexer::tryTransformTryUsageForC() {
529 if (Tokens.size() < 2)
531 auto &Try = *(Tokens.end() - 2);
532 if (Try->isNot(tok::kw_try))
534 auto &Next = *(Tokens.end() - 1);
535 if (Next->isOneOf(tok::l_brace, tok::colon, tok::hash, tok::comment))
538 if (Tokens.size() > 2) {
539 auto &At = *(Tokens.end() - 3);
544 Try->Tok.setKind(tok::identifier);
548bool FormatTokenLexer::tryMergeLessLess() {
550 if (Tokens.size() < 3)
553 auto First = Tokens.end() - 3;
554 if (
First[0]->isNot(tok::less) ||
First[1]->isNot(tok::less))
558 if (
First[1]->hasWhitespaceBefore())
561 auto X = Tokens.size() > 3 ?
First[-1] :
nullptr;
562 if (
X &&
X->is(tok::less))
566 if ((!
X ||
X->isNot(tok::kw_operator)) && Y->is(tok::less))
569 First[0]->Tok.setKind(tok::lessless);
570 First[0]->TokenText =
"<<";
571 First[0]->ColumnWidth += 1;
572 Tokens.erase(Tokens.end() - 2);
576bool FormatTokenLexer::tryMergeGreaterGreater() {
578 if (Tokens.size() < 2)
581 auto First = Tokens.end() - 2;
582 if (
First[0]->isNot(tok::greater) ||
First[1]->isNot(tok::greater))
586 if (
First[1]->hasWhitespaceBefore())
589 auto Tok = Tokens.size() > 2 ?
First[-1] :
nullptr;
590 if (Tok && Tok->isNot(tok::kw_operator))
593 First[0]->Tok.setKind(tok::greatergreater);
594 First[0]->TokenText =
">>";
595 First[0]->ColumnWidth += 1;
596 Tokens.erase(Tokens.end() - 1);
600bool FormatTokenLexer::tryMergeUserDefinedLiteral() {
601 if (Tokens.size() < 2)
604 auto *
First = Tokens.end() - 2;
605 auto &Suffix =
First[1];
606 if (Suffix->hasWhitespaceBefore() || Suffix->TokenText !=
"$")
614 if (!
Text.ends_with(
"_"))
619 Tokens.erase(&Suffix);
623bool FormatTokenLexer::tryMergeTokens(ArrayRef<tok::TokenKind> Kinds,
625 if (Tokens.size() < Kinds.size())
628 const auto *
First = Tokens.end() - Kinds.size();
629 for (
unsigned i = 0; i < Kinds.size(); ++i)
630 if (
First[i]->isNot(Kinds[i]))
633 return tryMergeTokens(Kinds.size(), NewType);
636bool FormatTokenLexer::tryMergeTokens(
size_t Count,
TokenType NewType) {
637 if (Tokens.size() < Count)
640 const auto *
First = Tokens.end() - Count;
641 unsigned AddLength = 0;
642 for (
size_t i = 1; i < Count; ++i) {
645 if (
First[i]->hasWhitespaceBefore())
647 AddLength +=
First[i]->TokenText.size();
650 Tokens.resize(Tokens.size() - Count + 1);
651 First[0]->TokenText = StringRef(
First[0]->TokenText.data(),
652 First[0]->TokenText.size() + AddLength);
653 First[0]->ColumnWidth += AddLength;
654 First[0]->setType(NewType);
658bool FormatTokenLexer::tryMergeTokensAny(
659 ArrayRef<ArrayRef<tok::TokenKind>> Kinds,
TokenType NewType) {
660 return llvm::any_of(Kinds, [
this, NewType](ArrayRef<tok::TokenKind> Kinds) {
661 return tryMergeTokens(Kinds, NewType);
666bool FormatTokenLexer::precedesOperand(FormatToken *Tok) {
670 return Tok->isOneOf(tok::period, tok::l_paren, tok::comma, tok::l_brace,
671 tok::r_brace, tok::l_square, tok::semi, tok::exclaim,
672 tok::colon, tok::question, tok::tilde) ||
673 Tok->isOneOf(tok::kw_return, tok::kw_do, tok::kw_case, tok::kw_throw,
674 tok::kw_else, tok::kw_void, tok::kw_typeof,
676 Tok->isPlacementOperator() || Tok->isBinaryOperator();
679bool FormatTokenLexer::canPrecedeRegexLiteral(FormatToken *Prev) {
689 if (Prev->isOneOf(tok::plusplus, tok::minusminus, tok::exclaim))
690 return Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]);
694 if (!precedesOperand(Prev))
700void FormatTokenLexer::tryParseJavaTextBlock() {
704 const auto *S = Lex->getBufferLocation();
705 const auto *End = Lex->getBuffer().end();
707 if (S == End || *S !=
'\"')
713 for (
int Count = 0; Count < 3 && S < End; ++S) {
727 resetLexer(SourceMgr.
getFileOffset(Lex->getSourceLocation(S)));
734void FormatTokenLexer::tryParseJSRegexLiteral() {
735 FormatToken *RegexToken = Tokens.back();
736 if (!RegexToken->isOneOf(tok::slash, tok::slashequal))
739 FormatToken *Prev =
nullptr;
740 for (FormatToken *FT : llvm::drop_begin(llvm::reverse(Tokens))) {
743 if (FT->isNot(tok::comment)) {
749 if (!canPrecedeRegexLiteral(Prev))
753 const char *Offset = Lex->getBufferLocation();
754 const char *RegexBegin = Offset - RegexToken->TokenText.size();
755 StringRef Buffer = Lex->getBuffer();
756 bool InCharacterClass =
false;
757 bool HaveClosingSlash =
false;
758 for (; !HaveClosingSlash && Offset != Buffer.end(); ++Offset) {
768 InCharacterClass =
true;
771 InCharacterClass =
false;
774 if (!InCharacterClass)
775 HaveClosingSlash =
true;
780 RegexToken->setType(TT_RegexLiteral);
782 RegexToken->Tok.setKind(tok::string_literal);
783 RegexToken->TokenText = StringRef(RegexBegin, Offset - RegexBegin);
784 RegexToken->ColumnWidth = RegexToken->TokenText.size();
786 resetLexer(SourceMgr.
getFileOffset(Lex->getSourceLocation(Offset)));
791 auto Repeated = [&
Begin, End]() {
807 for (
int UnmatchedOpeningBraceCount = 0;
Begin < End; ++
Begin) {
819 ++UnmatchedOpeningBraceCount;
827 else if (UnmatchedOpeningBraceCount > 0)
828 --UnmatchedOpeningBraceCount;
834 if (UnmatchedOpeningBraceCount > 0)
837 if (Verbatim && Repeated()) {
848void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
849 FormatToken *CSharpStringLiteral = Tokens.back();
851 if (CSharpStringLiteral->isNot(TT_CSharpStringLiteral))
854 auto &TokenText = CSharpStringLiteral->TokenText;
856 bool Verbatim =
false;
857 bool Interpolated =
false;
858 if (TokenText.starts_with(R
"($@")") || TokenText.starts_with(R"(@$")")) {
861 }
else if (TokenText.starts_with(R
"(@")")) {
863 }
else if (TokenText.starts_with(R
"($")")) {
868 if (!Verbatim && !Interpolated)
871 const char *StrBegin = Lex->getBufferLocation() - TokenText.size();
872 const char *Offset = StrBegin;
873 Offset += Verbatim && Interpolated ? 3 : 2;
875 const auto End = Lex->getBuffer().end();
883 StringRef LiteralText(StrBegin, Offset - StrBegin + 1);
884 TokenText = LiteralText;
887 size_t FirstBreak = LiteralText.find(
'\n');
888 StringRef FirstLineText = FirstBreak == StringRef::npos
890 : LiteralText.substr(0, FirstBreak);
892 FirstLineText, CSharpStringLiteral->OriginalColumn, Style.
TabWidth,
894 size_t LastBreak = LiteralText.rfind(
'\n');
895 if (LastBreak != StringRef::npos) {
896 CSharpStringLiteral->IsMultiline =
true;
897 unsigned StartColumn = 0;
898 CSharpStringLiteral->LastLineColumnWidth =
900 StartColumn, Style.
TabWidth, Encoding);
903 assert(Offset < End);
904 resetLexer(SourceMgr.
getFileOffset(Lex->getSourceLocation(Offset + 1)));
907void FormatTokenLexer::handleTableGenMultilineString() {
908 FormatToken *MultiLineString = Tokens.back();
909 if (MultiLineString->isNot(TT_TableGenMultiLineString))
912 auto OpenOffset = Lex->getCurrentBufferOffset() - 2 ;
914 auto CloseOffset = Lex->getBuffer().find(
"}]", OpenOffset);
915 if (CloseOffset == StringRef::npos)
917 auto Text = Lex->getBuffer().substr(OpenOffset, CloseOffset - OpenOffset + 2);
918 MultiLineString->TokenText =
Text;
920 Lex->getSourceLocation(Lex->getBufferLocation() - 2 +
Text.size())));
921 auto FirstLineText =
Text;
922 auto FirstBreak =
Text.find(
'\n');
924 if (FirstBreak != StringRef::npos) {
925 MultiLineString->IsMultiline =
true;
926 FirstLineText =
Text.substr(0, FirstBreak + 1);
928 auto LastBreak =
Text.rfind(
'\n');
930 Text.substr(LastBreak + 1), MultiLineString->OriginalColumn,
935 FirstLineText, MultiLineString->OriginalColumn, Style.
TabWidth, Encoding);
938void FormatTokenLexer::handleTableGenNumericLikeIdentifier() {
939 FormatToken *Tok = Tokens.back();
942 if (Tok->isNot(tok::numeric_constant))
944 StringRef
Text = Tok->TokenText;
955 const auto NonDigitPos =
Text.find_if([](
char C) {
return !isdigit(
C); });
957 if (NonDigitPos == StringRef::npos)
959 char FirstNonDigit =
Text[NonDigitPos];
960 if (NonDigitPos <
Text.size() - 1) {
961 char TheNext =
Text[NonDigitPos + 1];
963 if (FirstNonDigit ==
'b' && (TheNext ==
'0' || TheNext ==
'1'))
966 if (FirstNonDigit ==
'x' && isxdigit(TheNext))
969 if (isalpha(FirstNonDigit) || FirstNonDigit ==
'_') {
971 Tok->Tok.setKind(tok::identifier);
972 Tok->Tok.setIdentifierInfo(
nullptr);
976void FormatTokenLexer::handleTemplateStrings() {
977 FormatToken *BacktickToken = Tokens.back();
979 if (BacktickToken->is(tok::l_brace)) {
983 if (BacktickToken->is(tok::r_brace)) {
984 if (StateStack.size() == 1)
990 }
else if (BacktickToken->is(tok::unknown) &&
991 BacktickToken->TokenText ==
"`") {
998 const char *Offset = Lex->getBufferLocation();
999 const char *TmplBegin = Offset - BacktickToken->TokenText.size();
1000 for (; Offset != Lex->getBuffer().end(); ++Offset) {
1001 if (Offset[0] ==
'`') {
1006 if (Offset[0] ==
'\\') {
1008 }
else if (Offset + 1 < Lex->getBuffer().end() && Offset[0] ==
'$' &&
1017 StringRef LiteralText(TmplBegin, Offset - TmplBegin);
1018 BacktickToken->setType(TT_TemplateString);
1019 BacktickToken->Tok.setKind(tok::string_literal);
1020 BacktickToken->TokenText = LiteralText;
1023 size_t FirstBreak = LiteralText.find(
'\n');
1024 StringRef FirstLineText = FirstBreak == StringRef::npos
1026 : LiteralText.substr(0, FirstBreak);
1028 FirstLineText, BacktickToken->OriginalColumn, Style.
TabWidth, Encoding);
1029 size_t LastBreak = LiteralText.rfind(
'\n');
1030 if (LastBreak != StringRef::npos) {
1031 BacktickToken->IsMultiline =
true;
1032 unsigned StartColumn = 0;
1033 BacktickToken->LastLineColumnWidth =
1035 StartColumn, Style.
TabWidth, Encoding);
1038 SourceLocation loc = Lex->getSourceLocation(Offset);
1042void FormatTokenLexer::tryParsePythonComment() {
1043 FormatToken *HashToken = Tokens.back();
1044 if (!HashToken->isOneOf(tok::hash, tok::hashhash))
1047 const char *CommentBegin =
1048 Lex->getBufferLocation() - HashToken->TokenText.size();
1049 size_t From = CommentBegin - Lex->getBuffer().begin();
1050 size_t To = Lex->getBuffer().find_first_of(
'\n', From);
1051 if (To == StringRef::npos)
1052 To = Lex->getBuffer().size();
1053 size_t Len = To - From;
1054 HashToken->setType(TT_LineComment);
1055 HashToken->Tok.setKind(tok::comment);
1056 HashToken->TokenText = Lex->getBuffer().substr(From, Len);
1057 SourceLocation
Loc = To < Lex->getBuffer().size()
1058 ? Lex->getSourceLocation(CommentBegin + Len)
1063bool FormatTokenLexer::tryMerge_TMacro() {
1064 if (Tokens.size() < 4)
1066 FormatToken *
Last = Tokens.back();
1067 if (
Last->isNot(tok::r_paren))
1070 FormatToken *String = Tokens[Tokens.size() - 2];
1071 if (String->isNot(tok::string_literal) || String->IsMultiline)
1074 if (Tokens[Tokens.size() - 3]->isNot(tok::l_paren))
1077 FormatToken *
Macro = Tokens[Tokens.size() - 4];
1078 if (
Macro->TokenText !=
"_T")
1081 const char *Start =
Macro->TokenText.data();
1082 const char *End =
Last->TokenText.data() +
Last->TokenText.size();
1083 String->TokenText = StringRef(Start, End - Start);
1084 String->IsFirst =
Macro->IsFirst;
1085 String->LastNewlineOffset =
Macro->LastNewlineOffset;
1086 String->WhitespaceRange =
Macro->WhitespaceRange;
1087 String->OriginalColumn =
Macro->OriginalColumn;
1089 String->TokenText, String->OriginalColumn, Style.
TabWidth, Encoding);
1090 String->NewlinesBefore =
Macro->NewlinesBefore;
1091 String->HasUnescapedNewline =
Macro->HasUnescapedNewline;
1096 Tokens.back() = String;
1097 if (FirstInLineIndex >= Tokens.size())
1098 FirstInLineIndex = Tokens.size() - 1;
1102bool FormatTokenLexer::tryMergeConflictMarkers() {
1103 if (Tokens.back()->NewlinesBefore == 0 && Tokens.back()->isNot(tok::eof))
1117 unsigned FirstInLineOffset;
1119 Tokens[FirstInLineIndex]->getStartOfNonWhitespace());
1122 auto LineOffset = Buffer.rfind(
'\n', FirstInLineOffset);
1123 if (LineOffset == StringRef::npos)
1128 auto FirstSpace = Buffer.find_first_of(
" \n", LineOffset);
1129 StringRef LineStart;
1130 if (FirstSpace == StringRef::npos)
1131 LineStart = Buffer.substr(LineOffset);
1133 LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
1136 if (LineStart ==
"<<<<<<<" || LineStart ==
">>>>") {
1137 Type = TT_ConflictStart;
1138 }
else if (LineStart ==
"|||||||" || LineStart ==
"=======" ||
1139 LineStart ==
"====") {
1140 Type = TT_ConflictAlternative;
1141 }
else if (LineStart ==
">>>>>>>" || LineStart ==
"<<<<") {
1142 Type = TT_ConflictEnd;
1145 if (Type != TT_Unknown) {
1146 FormatToken *Next = Tokens.back();
1148 Tokens.resize(FirstInLineIndex + 1);
1152 Tokens.back()->setType(Type);
1153 Tokens.back()->Tok.setKind(tok::kw___unknown_anytype);
1155 Tokens.push_back(Next);
1162FormatToken *FormatTokenLexer::getStashedToken() {
1164 Token Tok = FormatTok->
Tok;
1165 StringRef TokenText = FormatTok->
TokenText;
1168 FormatTok =
new (Allocator.Allocate()) FormatToken;
1169 FormatTok->
Tok = Tok;
1170 SourceLocation TokLocation =
1187void FormatTokenLexer::truncateToken(
size_t NewLen) {
1188 assert(NewLen <= FormatTok->TokenText.size());
1190 Lex->getBufferLocation() - FormatTok->
TokenText.size() + NewLen)));
1205 const unsigned char *
const Begin =
Text.bytes_begin();
1206 const unsigned char *
const End =
Text.bytes_end();
1207 const unsigned char *Cur =
Begin;
1211 }
else if (Cur[0] ==
'\\') {
1217 const auto *Lookahead = Cur + 1;
1224 Cur = Lookahead + 1;
1232FormatToken *FormatTokenLexer::getNextToken() {
1235 return getStashedToken();
1238 FormatTok =
new (Allocator.Allocate()) FormatToken;
1239 readRawToken(*FormatTok);
1240 SourceLocation WhitespaceStart =
1242 FormatTok->
IsFirst = IsFirstToken;
1243 IsFirstToken =
false;
1249 unsigned WhitespaceLength = TrailingWhitespace;
1250 while (FormatTok->
isNot(tok::eof)) {
1252 if (LeadingWhitespace == 0)
1254 if (LeadingWhitespace < FormatTok->TokenText.size())
1255 truncateToken(LeadingWhitespace);
1257 bool InEscape =
false;
1258 for (
int i = 0, e =
Text.size(); i != e; ++i) {
1264 if (i + 1 < e &&
Text[i + 1] ==
'\n')
1279 i > 0 &&
Text[i - 1] ==
'\n' &&
1280 ((i + 1 < e &&
Text[i + 1] ==
'\n') ||
1281 (i + 2 < e &&
Text[i + 1] ==
'\r' &&
Text[i + 2] ==
'\n'))) {
1300 assert([&]() ->
bool {
1304 return j <
Text.size() && (
Text[j] ==
'\n' ||
Text[j] ==
'\r');
1314 WhitespaceLength +=
Text.size();
1315 readRawToken(*FormatTok);
1318 if (FormatTok->
is(tok::unknown))
1319 FormatTok->
setType(TT_ImplicitStringLiteral);
1321 const bool IsCpp = Style.
isCpp();
1331 Text.starts_with(
"//") &&
1333 assert(FormatTok->
is(tok::comment));
1334 for (
auto Pos =
Text.find(
'\\'); Pos++ != StringRef::npos;
1335 Pos =
Text.find(
'\\', Pos)) {
1336 if (Pos <
Text.size() &&
Text[Pos] ==
'\n' &&
1337 (!IsCpp ||
Text.substr(Pos + 1).ltrim().starts_with(
"//"))) {
1345 static const llvm::Regex NumberBase(
"^s?[bdho]", llvm::Regex::IgnoreCase);
1346 SmallVector<StringRef, 1> Matches;
1352 if (FormatTok->
is(tok::numeric_constant)) {
1354 auto Quote = FormatTok->
TokenText.find(
'\'');
1355 if (Quote != StringRef::npos)
1356 truncateToken(Quote);
1357 }
else if (FormatTok->
isOneOf(tok::hash, tok::hashhash)) {
1359 }
else if (FormatTok->
is(tok::raw_identifier)) {
1363 }
else if (FormatTok->
TokenText ==
"``") {
1366 }
else if (!Tokens.empty() && Tokens.back()->is(Keywords.
kw_apostrophe) &&
1367 NumberBase.match(FormatTok->
TokenText, &Matches)) {
1372 truncateToken(Matches[0].size());
1379 WhitespaceStart, WhitespaceStart.getLocWithOffset(WhitespaceLength));
1383 TrailingWhitespace = 0;
1384 if (FormatTok->
is(tok::comment)) {
1386 StringRef UntrimmedText = FormatTok->
TokenText;
1388 TrailingWhitespace = UntrimmedText.size() - FormatTok->
TokenText.size();
1389 }
else if (FormatTok->
is(tok::raw_identifier)) {
1390 IdentifierInfo &Info = IdentTable.
get(FormatTok->
TokenText);
1394 FormatTok->
isOneOf(tok::kw_struct, tok::kw_union, tok::kw_delete,
1395 tok::kw_operator)) {
1398 FormatTok->
isOneOf(tok::kw_struct, tok::kw_union,
1399 tok::kw_operator)) {
1404 }
else if (
const bool Greater = FormatTok->
is(tok::greatergreater);
1405 Greater || FormatTok->
is(tok::lessless)) {
1410 }
else if (Style.
isJava() && FormatTok->
is(tok::string_literal)) {
1411 tryParseJavaTextBlock();
1414 if (Style.
isVerilog() && !Tokens.empty() &&
1415 Tokens.back()->is(TT_VerilogNumberBase) &&
1416 FormatTok->
Tok.
isOneOf(tok::identifier, tok::question)) {
1418 FormatTok->
Tok.
setKind(tok::numeric_constant);
1424 size_t FirstNewlinePos =
Text.find(
'\n');
1425 if (FirstNewlinePos == StringRef::npos) {
1436 Text.substr(0, FirstNewlinePos), Column, Style.
TabWidth, Encoding);
1448 if ((Tokens.empty() || !Tokens.back()->Tok.getIdentifierInfo() ||
1449 Tokens.back()->Tok.getIdentifierInfo()->getPPKeywordID() !=
1451 it != Macros.end()) {
1452 FormatTok->
setType(it->second);
1453 if (it->second == TT_IfMacro) {
1460 }
else if (FormatTok->
is(tok::identifier)) {
1461 if (MacroBlockBeginRegex.match(
Text))
1462 FormatTok->
setType(TT_MacroBlockBegin);
1463 else if (MacroBlockEndRegex.match(
Text))
1464 FormatTok->
setType(TT_MacroBlockEnd);
1465 else if (MacrosSkippedByRemoveParentheses.contains(
Identifier))
1471 else if (VariableTemplates.contains(
Identifier))
1479bool FormatTokenLexer::readRawTokenVerilogSpecific(Token &Tok) {
1480 const char *Start = Lex->getBufferLocation();
1490 if (Start[1] ==
'`')
1503 if (Start[1] ==
'\r' || Start[1] ==
'\n')
1506 while (Start[Len] !=
'\0' && Start[Len] !=
'\f' && Start[Len] !=
'\n' &&
1507 Start[Len] !=
'\r' && Start[Len] !=
'\t' && Start[Len] !=
'\v' &&
1508 Start[Len] !=
' ') {
1511 if (Start[Len] ==
'\\' && Start[Len + 1] ==
'\r' &&
1512 Start[Len + 2] ==
'\n') {
1514 }
else if (Start[Len] ==
'\\' &&
1515 (Start[Len + 1] ==
'\r' || Start[Len + 1] ==
'\n')) {
1529 Tok.setKind(tok::raw_identifier);
1531 Tok.setLocation(Lex->getSourceLocation(Start, Len));
1532 Tok.setRawIdentifierData(Start);
1533 Lex->seek(Lex->getCurrentBufferOffset() + Len,
false);
1537void FormatTokenLexer::readRawToken(FormatToken &Tok) {
1540 if (!Style.
isVerilog() || !readRawTokenVerilogSpecific(Tok.Tok))
1541 Lex->LexFromRawLexer(Tok.Tok);
1542 Tok.TokenText = StringRef(SourceMgr.
getCharacterData(Tok.Tok.getLocation()),
1543 Tok.Tok.getLength());
1546 if (Tok.is(tok::unknown)) {
1547 if (Tok.TokenText.starts_with(
"\"")) {
1548 Tok.Tok.setKind(tok::string_literal);
1549 Tok.IsUnterminatedLiteral =
true;
1550 }
else if (Style.
isJavaScript() && Tok.TokenText ==
"''") {
1551 Tok.Tok.setKind(tok::string_literal);
1556 Tok.Tok.setKind(tok::string_literal);
1559 FormattingDisabled =
false;
1561 Tok.Finalized = FormattingDisabled;
1564 FormattingDisabled =
true;
1567void FormatTokenLexer::resetLexer(
unsigned Offset) {
1570 Buffer.begin(), Buffer.begin() + Offset, Buffer.end()));
1571 Lex->SetKeepWhitespaceMode(
true);
1572 TrailingWhitespace = 0;
Defines the clang::SourceLocation class and associated facilities.
Defines the SourceManager interface.
An opaque identifier used by SourceManager which refers to a source file (MemoryBuffer) along with it...
Implements an efficient mapping from strings to IdentifierInfo nodes.
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
Lexer - This provides a simple interface that turns a text buffer into a stream of tokens.
SourceLocation getLocWithOffset(IntTy Offset) const
Return a source location with the specified offset from this SourceLocation.
This class handles loading and caching of source files into memory.
FileIDAndOffset getDecomposedLoc(SourceLocation Loc) const
Decompose the specified location into a raw FileID + Offset pair.
unsigned getFileOffset(SourceLocation SpellingLoc) const
Returns the offset from the start of the file that the specified SourceLocation represents.
StringRef getBufferData(FileID FID, bool *Invalid=nullptr) const
Return a StringRef to the source buffer data for the specified FileID.
SourceLocation getLocForEndOfFile(FileID FID) const
Return the source location corresponding to the last byte of the specified file.
const char * getCharacterData(SourceLocation SL, bool *Invalid=nullptr) const
Return a pointer to the start of the specified location in the appropriate spelling MemoryBuffer.
llvm::MemoryBufferRef getBufferOrFake(FileID FID, SourceLocation Loc=SourceLocation()) const
Return the buffer for the specified FileID.
SourceLocation getLocForStartOfFile(FileID FID) const
Return the source location corresponding to the first byte of the specified file.
Token - This structure provides full information about a lexed token.
IdentifierInfo * getIdentifierInfo() const
SourceLocation getLocation() const
Return a source location identifier for the specified offset in the current file.
void setLength(unsigned Len)
void setKind(tok::TokenKind K)
bool isOneOf(Ts... Ks) const
void setLocation(SourceLocation L)
void setIdentifierInfo(IdentifierInfo *II)
uint32_t Literal
Literals are represented as positive integers.
TokenKind
Provides a simple uniform namespace for tokens from all C languages.
The JSON file list parser is used to communicate input to InstallAPI.
LLVM_READONLY bool isVerticalWhitespace(unsigned char c)
Returns true if this character is vertical ASCII whitespace: '\n', '\r'.
LLVM_READONLY bool isHorizontalWhitespace(unsigned char c)
Returns true if this character is horizontal ASCII whitespace: ' ', '\t', '\f', '\v'.
LLVM_READONLY bool isWhitespace(unsigned char c)
Return true if this character is horizontal or vertical ASCII whitespace: ' ', '\t',...
@ Keyword
The name has been typo-corrected to a keyword.