mirror of
https://github.com/renovatebot/renovate.git
synced 2025-01-13 15:36:25 +00:00
refactor(managers): Use moo fallback for unknown fragments (#9870)
This commit is contained in:
parent
03e1ade35f
commit
b974ba0abb
5 changed files with 27 additions and 49 deletions
|
@ -85,23 +85,23 @@ const lexer = moo.states({
|
|||
].join('|')
|
||||
),
|
||||
},
|
||||
unknown: { match: /[^]/, lineBreaks: true },
|
||||
unknown: moo.fallback,
|
||||
},
|
||||
longDoubleQuoted: {
|
||||
stringFinish: { match: '"""', pop: 1 },
|
||||
char: { match: /[^]/, lineBreaks: true },
|
||||
char: moo.fallback,
|
||||
},
|
||||
doubleQuoted: {
|
||||
stringFinish: { match: '"', pop: 1 },
|
||||
char: { match: /[^]/, lineBreaks: true },
|
||||
char: moo.fallback,
|
||||
},
|
||||
longSingleQuoted: {
|
||||
stringFinish: { match: "'''", pop: 1 },
|
||||
char: { match: /[^]/, lineBreaks: true },
|
||||
char: moo.fallback,
|
||||
},
|
||||
singleQuoted: {
|
||||
stringFinish: { match: "'", pop: 1 },
|
||||
char: { match: /[^]/, lineBreaks: true },
|
||||
char: moo.fallback,
|
||||
},
|
||||
});
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ const lexer = moo.states({
|
|||
match: /^#(?:addin|tool|module)\s+"(?:nuget|dotnet):[^"]+"\s*$/,
|
||||
value: (s: string) => s.trim().slice(1, -1),
|
||||
},
|
||||
unknown: { match: /[^]/, lineBreaks: true },
|
||||
unknown: moo.fallback,
|
||||
},
|
||||
});
|
||||
|
||||
|
|
|
@ -56,11 +56,10 @@ export enum TokenType {
|
|||
TripleDoubleQuotedStart = 'tripleDoubleQuotedStart',
|
||||
TripleQuotedFinish = 'tripleQuotedFinish',
|
||||
|
||||
Char = 'char',
|
||||
Chars = 'chars',
|
||||
EscapedChar = 'escapedChar',
|
||||
String = 'string',
|
||||
|
||||
UnknownLexeme = 'unknownChar',
|
||||
UnknownFragment = 'unknownFragment',
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ describe(getName(), () => {
|
|||
TokenType.RightBrace,
|
||||
TokenType.RightBrace,
|
||||
],
|
||||
'@': [TokenType.UnknownLexeme],
|
||||
'@': [TokenType.UnknownFragment],
|
||||
"'\\''": [
|
||||
TokenType.SingleQuotedStart,
|
||||
TokenType.EscapedChar,
|
||||
|
@ -54,23 +54,22 @@ describe(getName(), () => {
|
|||
],
|
||||
"'x'": [
|
||||
TokenType.SingleQuotedStart,
|
||||
TokenType.Char,
|
||||
TokenType.Chars,
|
||||
TokenType.SingleQuotedFinish,
|
||||
],
|
||||
"'\n'": [
|
||||
TokenType.SingleQuotedStart,
|
||||
TokenType.Char,
|
||||
TokenType.Chars,
|
||||
TokenType.SingleQuotedFinish,
|
||||
],
|
||||
"'$x'": [
|
||||
TokenType.SingleQuotedStart,
|
||||
TokenType.Char,
|
||||
TokenType.Char,
|
||||
TokenType.Chars,
|
||||
TokenType.SingleQuotedFinish,
|
||||
],
|
||||
"''''''": ['tripleQuotedStart', 'tripleQuotedFinish'],
|
||||
"'''x'''": ['tripleQuotedStart', TokenType.Char, 'tripleQuotedFinish'],
|
||||
"'''\n'''": ['tripleQuotedStart', TokenType.Char, 'tripleQuotedFinish'],
|
||||
"'''x'''": ['tripleQuotedStart', TokenType.Chars, 'tripleQuotedFinish'],
|
||||
"'''\n'''": ['tripleQuotedStart', TokenType.Chars, 'tripleQuotedFinish'],
|
||||
"'''\\''''": [
|
||||
'tripleQuotedStart',
|
||||
TokenType.EscapedChar,
|
||||
|
@ -106,12 +105,12 @@ describe(getName(), () => {
|
|||
],
|
||||
'"x"': [
|
||||
TokenType.DoubleQuotedStart,
|
||||
TokenType.Char,
|
||||
TokenType.Chars,
|
||||
TokenType.DoubleQuotedFinish,
|
||||
],
|
||||
'"\n"': [
|
||||
TokenType.DoubleQuotedStart,
|
||||
TokenType.Char,
|
||||
TokenType.Chars,
|
||||
TokenType.DoubleQuotedFinish,
|
||||
],
|
||||
// eslint-disable-next-line no-template-curly-in-string
|
||||
|
@ -130,9 +129,7 @@ describe(getName(), () => {
|
|||
'"${x()}"': [
|
||||
TokenType.DoubleQuotedStart,
|
||||
TokenType.IgnoredInterpolationStart,
|
||||
TokenType.UnknownLexeme,
|
||||
TokenType.UnknownLexeme,
|
||||
TokenType.UnknownLexeme,
|
||||
TokenType.UnknownFragment,
|
||||
TokenType.RightBrace,
|
||||
TokenType.DoubleQuotedFinish,
|
||||
],
|
||||
|
@ -140,7 +137,7 @@ describe(getName(), () => {
|
|||
'"${x{}}"': [
|
||||
TokenType.DoubleQuotedStart,
|
||||
TokenType.IgnoredInterpolationStart,
|
||||
TokenType.UnknownLexeme,
|
||||
TokenType.UnknownFragment,
|
||||
TokenType.LeftBrace,
|
||||
TokenType.RightBrace,
|
||||
TokenType.RightBrace,
|
||||
|
|
|
@ -55,26 +55,26 @@ const lexer = moo.states({
|
|||
match: '"',
|
||||
push: TokenType.DoubleQuotedStart,
|
||||
},
|
||||
[TokenType.UnknownLexeme]: { match: /./ },
|
||||
[TokenType.UnknownFragment]: moo.fallback,
|
||||
},
|
||||
|
||||
// Tokenize triple-quoted string literal characters
|
||||
[TokenType.TripleSingleQuotedStart]: {
|
||||
...escapedChars,
|
||||
[TokenType.TripleQuotedFinish]: { match: "'''", pop: 1 },
|
||||
[TokenType.Char]: { match: /[^]/, lineBreaks: true },
|
||||
[TokenType.Chars]: moo.fallback,
|
||||
},
|
||||
[TokenType.TripleDoubleQuotedStart]: {
|
||||
...escapedChars,
|
||||
[TokenType.TripleQuotedFinish]: { match: '"""', pop: 1 },
|
||||
[TokenType.Char]: { match: /[^]/, lineBreaks: true },
|
||||
[TokenType.Chars]: moo.fallback,
|
||||
},
|
||||
|
||||
// Tokenize single-quoted string literal characters
|
||||
[TokenType.SingleQuotedStart]: {
|
||||
...escapedChars,
|
||||
[TokenType.SingleQuotedFinish]: { match: "'", pop: 1 },
|
||||
[TokenType.Char]: { match: /[^]/, lineBreaks: true },
|
||||
[TokenType.Chars]: moo.fallback,
|
||||
},
|
||||
|
||||
// Tokenize double-quoted string literal chars and interpolations
|
||||
|
@ -91,7 +91,7 @@ const lexer = moo.states({
|
|||
match: /\${/,
|
||||
push: TokenType.IgnoredInterpolationStart,
|
||||
},
|
||||
[TokenType.Char]: { match: /[^]/, lineBreaks: true },
|
||||
[TokenType.Chars]: moo.fallback,
|
||||
},
|
||||
|
||||
// Ignore interpolation of complex expressions˙,
|
||||
|
@ -102,34 +102,17 @@ const lexer = moo.states({
|
|||
push: TokenType.IgnoredInterpolationStart,
|
||||
},
|
||||
[TokenType.RightBrace]: { match: '}', pop: 1 },
|
||||
[TokenType.UnknownLexeme]: { match: /[^]/, lineBreaks: true },
|
||||
[TokenType.UnknownFragment]: moo.fallback,
|
||||
},
|
||||
});
|
||||
|
||||
/*
|
||||
Turn UnknownLexeme chars to UnknownFragment strings
|
||||
*/
|
||||
function processUnknownLexeme(acc: Token[], token: Token): Token[] {
|
||||
if (token.type === TokenType.UnknownLexeme) {
|
||||
const prevToken: Token = acc[acc.length - 1];
|
||||
if (prevToken?.type === TokenType.UnknownFragment) {
|
||||
prevToken.value += token.value;
|
||||
} else {
|
||||
acc.push({ ...token, type: TokenType.UnknownFragment });
|
||||
}
|
||||
} else {
|
||||
acc.push(token);
|
||||
}
|
||||
return acc;
|
||||
}
|
||||
|
||||
//
|
||||
// Turn separated chars of string literal to single String token
|
||||
// Turn substrings of chars and escaped chars into single String token
|
||||
//
|
||||
function processChar(acc: Token[], token: Token): Token[] {
|
||||
function processChars(acc: Token[], token: Token): Token[] {
|
||||
const tokenType = token.type;
|
||||
const prevToken: Token = acc[acc.length - 1];
|
||||
if ([TokenType.Char, TokenType.EscapedChar].includes(tokenType)) {
|
||||
if ([TokenType.Chars, TokenType.EscapedChar].includes(tokenType)) {
|
||||
if (prevToken?.type === TokenType.String) {
|
||||
prevToken.value += token.value;
|
||||
} else {
|
||||
|
@ -221,8 +204,7 @@ export function extractRawTokens(input: string): Token[] {
|
|||
|
||||
export function processTokens(tokens: Token[]): Token[] {
|
||||
return tokens
|
||||
.reduce(processUnknownLexeme, [])
|
||||
.reduce(processChar, [])
|
||||
.reduce(processChars, [])
|
||||
.reduce(processInterpolation, [])
|
||||
.filter(filterTokens);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue