1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
|
//===--- LexerUtils.cpp - clang-tidy---------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "LexerUtils.h"
#include "clang/AST/AST.h"
#include "clang/Basic/SourceManager.h"
namespace clang {
namespace tidy {
namespace utils {
namespace lexer {
Token getPreviousToken(SourceLocation Location, const SourceManager &SM,
const LangOptions &LangOpts, bool SkipComments) {
Token Token;
Token.setKind(tok::unknown);
Location = Location.getLocWithOffset(-1);
if (Location.isInvalid())
return Token;
auto StartOfFile = SM.getLocForStartOfFile(SM.getFileID(Location));
while (Location != StartOfFile) {
Location = Lexer::GetBeginningOfToken(Location, SM, LangOpts);
if (!Lexer::getRawToken(Location, Token, SM, LangOpts) &&
(!SkipComments || !Token.is(tok::comment))) {
break;
}
Location = Location.getLocWithOffset(-1);
}
return Token;
}
SourceLocation findPreviousTokenStart(SourceLocation Start,
const SourceManager &SM,
const LangOptions &LangOpts) {
if (Start.isInvalid() || Start.isMacroID())
return SourceLocation();
SourceLocation BeforeStart = Start.getLocWithOffset(-1);
if (BeforeStart.isInvalid() || BeforeStart.isMacroID())
return SourceLocation();
return Lexer::GetBeginningOfToken(BeforeStart, SM, LangOpts);
}
SourceLocation findPreviousTokenKind(SourceLocation Start,
const SourceManager &SM,
const LangOptions &LangOpts,
tok::TokenKind TK) {
if (Start.isInvalid() || Start.isMacroID())
return SourceLocation();
while (true) {
SourceLocation L = findPreviousTokenStart(Start, SM, LangOpts);
if (L.isInvalid() || L.isMacroID())
return SourceLocation();
Token T;
if (Lexer::getRawToken(L, T, SM, LangOpts, /*IgnoreWhiteSpace=*/true))
return SourceLocation();
if (T.is(TK))
return T.getLocation();
Start = L;
}
}
SourceLocation findNextTerminator(SourceLocation Start, const SourceManager &SM,
const LangOptions &LangOpts) {
return findNextAnyTokenKind(Start, SM, LangOpts, tok::comma, tok::semi);
}
Optional<Token> findNextTokenSkippingComments(SourceLocation Start,
const SourceManager &SM,
const LangOptions &LangOpts) {
Optional<Token> CurrentToken;
do {
CurrentToken = Lexer::findNextToken(Start, SM, LangOpts);
} while (CurrentToken && CurrentToken->is(tok::comment));
return CurrentToken;
}
bool rangeContainsExpansionsOrDirectives(SourceRange Range,
const SourceManager &SM,
const LangOptions &LangOpts) {
assert(Range.isValid() && "Invalid Range for relexing provided");
SourceLocation Loc = Range.getBegin();
while (Loc < Range.getEnd()) {
if (Loc.isMacroID())
return true;
llvm::Optional<Token> Tok = Lexer::findNextToken(Loc, SM, LangOpts);
if (!Tok)
return true;
if (Tok->is(tok::hash))
return true;
Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts).getLocWithOffset(1);
}
return false;
}
llvm::Optional<Token> getQualifyingToken(tok::TokenKind TK,
CharSourceRange Range,
const ASTContext &Context,
const SourceManager &SM) {
assert((TK == tok::kw_const || TK == tok::kw_volatile ||
TK == tok::kw_restrict) &&
"TK is not a qualifier keyword");
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Range.getBegin());
StringRef File = SM.getBufferData(LocInfo.first);
Lexer RawLexer(SM.getLocForStartOfFile(LocInfo.first), Context.getLangOpts(),
File.begin(), File.data() + LocInfo.second, File.end());
llvm::Optional<Token> LastMatchBeforeTemplate;
llvm::Optional<Token> LastMatchAfterTemplate;
bool SawTemplate = false;
Token Tok;
while (!RawLexer.LexFromRawLexer(Tok) &&
Range.getEnd() != Tok.getLocation() &&
!SM.isBeforeInTranslationUnit(Range.getEnd(), Tok.getLocation())) {
if (Tok.is(tok::raw_identifier)) {
IdentifierInfo &Info = Context.Idents.get(
StringRef(SM.getCharacterData(Tok.getLocation()), Tok.getLength()));
Tok.setIdentifierInfo(&Info);
Tok.setKind(Info.getTokenID());
}
if (Tok.is(tok::less))
SawTemplate = true;
else if (Tok.isOneOf(tok::greater, tok::greatergreater))
LastMatchAfterTemplate = None;
else if (Tok.is(TK)) {
if (SawTemplate)
LastMatchAfterTemplate = Tok;
else
LastMatchBeforeTemplate = Tok;
}
}
return LastMatchAfterTemplate != None ? LastMatchAfterTemplate
: LastMatchBeforeTemplate;
}
static bool breakAndReturnEnd(const Stmt &S) {
return isa<CompoundStmt, DeclStmt, NullStmt>(S);
}
static bool breakAndReturnEndPlus1Token(const Stmt &S) {
return isa<Expr, DoStmt, ReturnStmt, BreakStmt, ContinueStmt, GotoStmt, SEHLeaveStmt>(S);
}
// Given a Stmt which does not include it's semicolon this method returns the
// SourceLocation of the semicolon.
static SourceLocation getSemicolonAfterStmtEndLoc(const SourceLocation &EndLoc,
const SourceManager &SM,
const LangOptions &LangOpts) {
if (EndLoc.isMacroID()) {
// Assuming EndLoc points to a function call foo within macro F.
// This method is supposed to return location of the semicolon within
// those macro arguments:
// F ( foo() ; )
// ^ EndLoc ^ SpellingLoc ^ next token of SpellingLoc
const SourceLocation SpellingLoc = SM.getSpellingLoc(EndLoc);
Optional<Token> NextTok =
findNextTokenSkippingComments(SpellingLoc, SM, LangOpts);
// Was the next token found successfully?
// All macro issues are simply resolved by ensuring it's a semicolon.
if (NextTok && NextTok->is(tok::TokenKind::semi)) {
// Ideally this would return `F` with spelling location `;` (NextTok)
// following the example above. For now simply return NextTok location.
return NextTok->getLocation();
}
// Fallthrough to 'normal handling'.
// F ( foo() ) ;
// ^ EndLoc ^ SpellingLoc ) ^ next token of EndLoc
}
Optional<Token> NextTok = findNextTokenSkippingComments(EndLoc, SM, LangOpts);
// Testing for semicolon again avoids some issues with macros.
if (NextTok && NextTok->is(tok::TokenKind::semi))
return NextTok->getLocation();
return SourceLocation();
}
SourceLocation getUnifiedEndLoc(const Stmt &S, const SourceManager &SM,
const LangOptions &LangOpts) {
const Stmt *LastChild = &S;
while (!LastChild->children().empty() && !breakAndReturnEnd(*LastChild) &&
!breakAndReturnEndPlus1Token(*LastChild)) {
for (const Stmt *Child : LastChild->children())
LastChild = Child;
}
if (!breakAndReturnEnd(*LastChild) &&
breakAndReturnEndPlus1Token(*LastChild))
return getSemicolonAfterStmtEndLoc(S.getEndLoc(), SM, LangOpts);
return S.getEndLoc();
}
} // namespace lexer
} // namespace utils
} // namespace tidy
} // namespace clang
|