commit
96a7343142
@ -14,12 +14,16 @@ module.exports = function(api) {
|
||||
let convertESM = true;
|
||||
let ignoreLib = true;
|
||||
let includeRuntime = false;
|
||||
const nodeVersion = "6.9";
|
||||
|
||||
switch (env) {
|
||||
// Configs used during bundling builds.
|
||||
case "babel-parser":
|
||||
convertESM = false;
|
||||
ignoreLib = false;
|
||||
envOpts.targets = {
|
||||
node: nodeVersion,
|
||||
};
|
||||
break;
|
||||
case "standalone":
|
||||
convertESM = false;
|
||||
@ -29,7 +33,7 @@ module.exports = function(api) {
|
||||
case "production":
|
||||
// Config during builds before publish.
|
||||
envOpts.targets = {
|
||||
node: "6.9",
|
||||
node: nodeVersion,
|
||||
};
|
||||
break;
|
||||
case "development":
|
||||
|
||||
@ -30,7 +30,7 @@
|
||||
"devDependencies": {
|
||||
"@babel/helper-fixtures": "^7.2.0",
|
||||
"charcodes": "0.1.0",
|
||||
"unicode-11.0.0": "^0.7.7"
|
||||
"unicode-11.0.0": "^0.7.8"
|
||||
},
|
||||
"bin": {
|
||||
"parser": "./bin/babel-parser.js"
|
||||
|
||||
@ -60,11 +60,15 @@ function generate(chars) {
|
||||
const startData = generate(start);
|
||||
const contData = generate(cont);
|
||||
|
||||
console.log("/* prettier-ignore */");
|
||||
console.log('let nonASCIIidentifierStartChars = "' + startData.nonASCII + '";');
|
||||
console.log("/* prettier-ignore */");
|
||||
console.log('let nonASCIIidentifierChars = "' + contData.nonASCII + '";');
|
||||
console.log("/* prettier-ignore */");
|
||||
console.log(
|
||||
"const astralIdentifierStartCodes = " + JSON.stringify(startData.astral) + ";"
|
||||
);
|
||||
console.log("/* prettier-ignore */");
|
||||
console.log(
|
||||
"const astralIdentifierCodes = " + JSON.stringify(contData.astral) + ";"
|
||||
);
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
// @flow
|
||||
|
||||
import type { Options } from "../options";
|
||||
import { reservedWords } from "../util/identifier";
|
||||
import { isES2015ReservedWord } from "../util/identifier";
|
||||
|
||||
import type State from "../tokenizer/state";
|
||||
import type { PluginsMap } from "./index";
|
||||
@ -16,21 +16,21 @@ export default class BaseParser {
|
||||
|
||||
// Initialized by Tokenizer
|
||||
state: State;
|
||||
input: string;
|
||||
|
||||
isReservedWord(word: string): boolean {
|
||||
if (word === "await") {
|
||||
return this.inModule;
|
||||
} else {
|
||||
return reservedWords[6](word);
|
||||
return isES2015ReservedWord(word);
|
||||
}
|
||||
}
|
||||
|
||||
hasPlugin(name: string): boolean {
|
||||
return Object.hasOwnProperty.call(this.plugins, name);
|
||||
return this.plugins.has(name);
|
||||
}
|
||||
|
||||
getPluginOption(plugin: string, name: string) {
|
||||
if (this.hasPlugin(plugin)) return this.plugins[plugin][name];
|
||||
// $FlowIssue
|
||||
if (this.hasPlugin(plugin)) return this.plugins.get(plugin)[name];
|
||||
}
|
||||
}
|
||||
|
||||
@ -21,7 +21,11 @@
|
||||
import { types as tt, type TokenType } from "../tokenizer/types";
|
||||
import * as N from "../types";
|
||||
import LValParser from "./lval";
|
||||
import { reservedWords } from "../util/identifier";
|
||||
import {
|
||||
isStrictReservedWord,
|
||||
isStrictBindReservedWord,
|
||||
isKeyword,
|
||||
} from "../util/identifier";
|
||||
import type { Pos, Position } from "../util/location";
|
||||
import * as charCodes from "charcodes";
|
||||
|
||||
@ -525,7 +529,7 @@ export default class ExpressionParser extends LValParser {
|
||||
} else if (this.match(tt.questionDot)) {
|
||||
this.expectPlugin("optionalChaining");
|
||||
state.optionalChainMember = true;
|
||||
if (noCalls && this.lookahead().type == tt.parenL) {
|
||||
if (noCalls && this.lookahead().type === tt.parenL) {
|
||||
state.stop = true;
|
||||
return base;
|
||||
}
|
||||
@ -869,9 +873,9 @@ export default class ExpressionParser extends LValParser {
|
||||
return this.parseFunction(node, false, false, true);
|
||||
} else if (
|
||||
canBeArrow &&
|
||||
!this.canInsertSemicolon() &&
|
||||
id.name === "async" &&
|
||||
this.match(tt.name)
|
||||
this.match(tt.name) &&
|
||||
!this.canInsertSemicolon()
|
||||
) {
|
||||
const oldYOAIPAP = this.state.yieldOrAwaitInPossibleArrowParameters;
|
||||
const oldInAsync = this.state.inAsync;
|
||||
@ -886,7 +890,8 @@ export default class ExpressionParser extends LValParser {
|
||||
return node;
|
||||
}
|
||||
|
||||
if (canBeArrow && !this.canInsertSemicolon() && this.eat(tt.arrow)) {
|
||||
if (canBeArrow && this.match(tt.arrow) && !this.canInsertSemicolon()) {
|
||||
this.next();
|
||||
const oldYOAIPAP = this.state.yieldOrAwaitInPossibleArrowParameters;
|
||||
this.state.yieldOrAwaitInPossibleArrowParameters = null;
|
||||
this.parseArrowExpression(node, [id]);
|
||||
@ -1146,7 +1151,11 @@ export default class ExpressionParser extends LValParser {
|
||||
|
||||
const node = this.startNodeAt(startPos, startLoc);
|
||||
this.addExtra(node, "rawValue", value);
|
||||
this.addExtra(node, "raw", this.input.slice(startPos, this.state.end));
|
||||
this.addExtra(
|
||||
node,
|
||||
"raw",
|
||||
this.state.input.slice(startPos, this.state.end),
|
||||
);
|
||||
node.value = value;
|
||||
this.next();
|
||||
return this.finishNode(node, type);
|
||||
@ -1365,7 +1374,7 @@ export default class ExpressionParser extends LValParser {
|
||||
}
|
||||
}
|
||||
elem.value = {
|
||||
raw: this.input
|
||||
raw: this.state.input
|
||||
.slice(this.state.start, this.state.end)
|
||||
.replace(/\r\n?/g, "\n"),
|
||||
cooked: this.state.value,
|
||||
@ -1967,7 +1976,8 @@ export default class ExpressionParser extends LValParser {
|
||||
if (
|
||||
(name === "class" || name === "function") &&
|
||||
(this.state.lastTokEnd !== this.state.lastTokStart + 1 ||
|
||||
this.input.charCodeAt(this.state.lastTokStart) !== charCodes.dot)
|
||||
this.state.input.charCodeAt(this.state.lastTokStart) !==
|
||||
charCodes.dot)
|
||||
) {
|
||||
this.state.context.pop();
|
||||
}
|
||||
@ -1991,8 +2001,8 @@ export default class ExpressionParser extends LValParser {
|
||||
): void {
|
||||
if (
|
||||
this.state.strict &&
|
||||
(reservedWords.strict(word) ||
|
||||
(isBinding && reservedWords.strictBind(word)))
|
||||
(isStrictReservedWord(word) ||
|
||||
(isBinding && isStrictBindReservedWord(word)))
|
||||
) {
|
||||
this.raise(startLoc, word + " is a reserved word in strict mode");
|
||||
}
|
||||
@ -2011,7 +2021,7 @@ export default class ExpressionParser extends LValParser {
|
||||
);
|
||||
}
|
||||
|
||||
if (this.isReservedWord(word) || (checkKeywords && this.isKeyword(word))) {
|
||||
if (this.isReservedWord(word) || (checkKeywords && isKeyword(word))) {
|
||||
this.raise(startLoc, word + " is a reserved word");
|
||||
}
|
||||
}
|
||||
@ -2071,8 +2081,8 @@ export default class ExpressionParser extends LValParser {
|
||||
this.next();
|
||||
if (
|
||||
this.match(tt.semi) ||
|
||||
this.canInsertSemicolon() ||
|
||||
(!this.match(tt.star) && !this.state.type.startsExpr)
|
||||
(!this.match(tt.star) && !this.state.type.startsExpr) ||
|
||||
this.canInsertSemicolon()
|
||||
) {
|
||||
node.delegate = false;
|
||||
node.argument = null;
|
||||
|
||||
@ -6,9 +6,7 @@ import type { PluginList } from "../plugin-utils";
|
||||
import { getOptions } from "../options";
|
||||
import StatementParser from "./statement";
|
||||
|
||||
export type PluginsMap = {
|
||||
[key: string]: { [option: string]: any },
|
||||
};
|
||||
export type PluginsMap = Map<string, { [string]: any }>;
|
||||
|
||||
export default class Parser extends StatementParser {
|
||||
// Forward-declaration so typescript plugin can override jsx plugin
|
||||
@ -22,7 +20,6 @@ export default class Parser extends StatementParser {
|
||||
|
||||
this.options = options;
|
||||
this.inModule = this.options.sourceType === "module";
|
||||
this.input = input;
|
||||
this.plugins = pluginsMap(this.options.plugins);
|
||||
this.filename = options.sourceFilename;
|
||||
}
|
||||
@ -36,10 +33,10 @@ export default class Parser extends StatementParser {
|
||||
}
|
||||
|
||||
function pluginsMap(plugins: PluginList): PluginsMap {
|
||||
const pluginMap: PluginsMap = (Object.create(null): Object);
|
||||
const pluginMap: PluginsMap = new Map();
|
||||
for (const plugin of plugins) {
|
||||
const [name, options = {}] = Array.isArray(plugin) ? plugin : [plugin, {}];
|
||||
if (!pluginMap[name]) pluginMap[name] = options || {};
|
||||
const [name, options] = Array.isArray(plugin) ? plugin : [plugin, {}];
|
||||
if (!pluginMap.has(name)) pluginMap.set(name, options || {});
|
||||
}
|
||||
return pluginMap;
|
||||
}
|
||||
|
||||
@ -21,7 +21,7 @@ export default class LocationParser extends CommentsParser {
|
||||
code?: string,
|
||||
} = {},
|
||||
): empty {
|
||||
const loc = getLineInfo(this.input, pos);
|
||||
const loc = getLineInfo(this.state.input, pos);
|
||||
message += ` (${loc.line}:${loc.column})`;
|
||||
// $FlowIgnore
|
||||
const err: SyntaxError & { pos: number, loc: Position } = new SyntaxError(
|
||||
|
||||
@ -56,9 +56,13 @@ export default class LValParser extends NodeUtils {
|
||||
|
||||
case "ObjectExpression":
|
||||
node.type = "ObjectPattern";
|
||||
for (let index = 0; index < node.properties.length; index++) {
|
||||
const prop = node.properties[index];
|
||||
const isLast = index === node.properties.length - 1;
|
||||
for (
|
||||
let i = 0, length = node.properties.length, last = length - 1;
|
||||
i < length;
|
||||
i++
|
||||
) {
|
||||
const prop = node.properties[i];
|
||||
const isLast = i === last;
|
||||
this.toAssignableObjectExpressionProp(prop, isBinding, isLast);
|
||||
}
|
||||
break;
|
||||
@ -145,12 +149,10 @@ export default class LValParser extends NodeUtils {
|
||||
const arg = last.argument;
|
||||
this.toAssignable(arg, isBinding, contextDescription);
|
||||
if (
|
||||
[
|
||||
"Identifier",
|
||||
"MemberExpression",
|
||||
"ArrayPattern",
|
||||
"ObjectPattern",
|
||||
].indexOf(arg.type) === -1
|
||||
arg.type !== "Identifier" &&
|
||||
arg.type !== "MemberExpression" &&
|
||||
arg.type !== "ArrayPattern" &&
|
||||
arg.type !== "ObjectPattern"
|
||||
) {
|
||||
this.unexpected(arg.start);
|
||||
}
|
||||
@ -426,14 +428,13 @@ export default class LValParser extends NodeUtils {
|
||||
}
|
||||
|
||||
checkToRestConversion(node: SpreadElement): void {
|
||||
const validArgumentTypes = ["Identifier", "MemberExpression"];
|
||||
|
||||
if (validArgumentTypes.indexOf(node.argument.type) !== -1) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (
|
||||
node.argument.type !== "Identifier" &&
|
||||
node.argument.type !== "MemberExpression"
|
||||
) {
|
||||
this.raise(node.argument.start, "Invalid rest operator's argument");
|
||||
}
|
||||
}
|
||||
|
||||
checkCommaAfterRest(close: TokenType, kind: string): void {
|
||||
if (this.match(tt.comma)) {
|
||||
|
||||
@ -7,8 +7,6 @@ import type { Comment, Node as NodeType, NodeBase } from "../types";
|
||||
|
||||
// Start an AST node, attaching a start offset.
|
||||
|
||||
const commentKeys = ["leadingComments", "trailingComments", "innerComments"];
|
||||
|
||||
class Node implements NodeBase {
|
||||
constructor(parser: Parser, pos: number, loc: Position) {
|
||||
this.type = "";
|
||||
@ -31,16 +29,22 @@ class Node implements NodeBase {
|
||||
|
||||
__clone(): this {
|
||||
// $FlowIgnore
|
||||
const node2: any = new Node();
|
||||
Object.keys(this).forEach(key => {
|
||||
const newNode: any = new Node();
|
||||
const keys = Object.keys(this);
|
||||
for (let i = 0, length = keys.length; i < length; i++) {
|
||||
const key = keys[i];
|
||||
// Do not clone comments that are already attached to the node
|
||||
if (commentKeys.indexOf(key) < 0) {
|
||||
if (
|
||||
key !== "leadingComments" &&
|
||||
key !== "trailingComments" &&
|
||||
key !== "innerComments"
|
||||
) {
|
||||
// $FlowIgnore
|
||||
node2[key] = this[key];
|
||||
newNode[key] = this[key];
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return node2;
|
||||
return newNode;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -44,7 +44,7 @@ export default class StatementParser extends ExpressionParser {
|
||||
const directiveLiteral = this.startNodeAt(expr.start, expr.loc.start);
|
||||
const directive = this.startNodeAt(stmt.start, stmt.loc.start);
|
||||
|
||||
const raw = this.input.slice(expr.start, expr.end);
|
||||
const raw = this.state.input.slice(expr.start, expr.end);
|
||||
const val = (directiveLiteral.value = raw.slice(1, -1)); // remove quotes
|
||||
|
||||
this.addExtra(directiveLiteral, "raw", raw);
|
||||
@ -161,7 +161,7 @@ export default class StatementParser extends ExpressionParser {
|
||||
this.next();
|
||||
|
||||
let result;
|
||||
if (starttype == tt._import) {
|
||||
if (starttype === tt._import) {
|
||||
result = this.parseImport(node);
|
||||
|
||||
if (
|
||||
@ -551,7 +551,9 @@ export default class StatementParser extends ExpressionParser {
|
||||
parseThrowStatement(node: N.ThrowStatement): N.ThrowStatement {
|
||||
this.next();
|
||||
if (
|
||||
lineBreak.test(this.input.slice(this.state.lastTokEnd, this.state.start))
|
||||
lineBreak.test(
|
||||
this.state.input.slice(this.state.lastTokEnd, this.state.start),
|
||||
)
|
||||
) {
|
||||
this.raise(this.state.lastTokEnd, "Illegal newline after throw");
|
||||
}
|
||||
@ -691,9 +693,9 @@ export default class StatementParser extends ExpressionParser {
|
||||
node.body = this.parseStatement(declaration);
|
||||
|
||||
if (
|
||||
node.body.type == "ClassDeclaration" ||
|
||||
(node.body.type == "VariableDeclaration" && node.body.kind !== "var") ||
|
||||
(node.body.type == "FunctionDeclaration" &&
|
||||
node.body.type === "ClassDeclaration" ||
|
||||
(node.body.type === "VariableDeclaration" && node.body.kind !== "var") ||
|
||||
(node.body.type === "FunctionDeclaration" &&
|
||||
(this.state.strict || node.body.generator || node.body.async))
|
||||
) {
|
||||
this.raise(node.body.start, "Invalid labeled declaration");
|
||||
@ -863,6 +865,7 @@ export default class StatementParser extends ExpressionParser {
|
||||
kind: TokenType,
|
||||
): N.VariableDeclaration {
|
||||
const declarations = (node.declarations = []);
|
||||
const isTypescript = this.hasPlugin("typescript");
|
||||
// $FlowFixMe
|
||||
node.kind = kind.keyword;
|
||||
for (;;) {
|
||||
@ -877,7 +880,7 @@ export default class StatementParser extends ExpressionParser {
|
||||
) {
|
||||
// `const` with no initializer is allowed in TypeScript.
|
||||
// It could be a declaration like `const x: number;`.
|
||||
if (!this.hasPlugin("typescript")) {
|
||||
if (!isTypescript) {
|
||||
this.unexpected();
|
||||
}
|
||||
} else if (
|
||||
@ -1278,7 +1281,7 @@ export default class StatementParser extends ExpressionParser {
|
||||
} else if (
|
||||
isSimple &&
|
||||
(key.name === "get" || key.name === "set") &&
|
||||
!(this.isLineTerminator() && this.match(tt.star))
|
||||
!(this.match(tt.star) && this.isLineTerminator())
|
||||
) {
|
||||
// `get\n*` is an uninitialized property named 'get' followed by a generator.
|
||||
// a getter or setter
|
||||
@ -1521,7 +1524,7 @@ export default class StatementParser extends ExpressionParser {
|
||||
isAsyncFunction() {
|
||||
if (!this.isContextual("async")) return false;
|
||||
|
||||
const { input, pos } = this.state;
|
||||
const { input, pos, length } = this.state;
|
||||
|
||||
skipWhiteSpace.lastIndex = pos;
|
||||
const skip = skipWhiteSpace.exec(input);
|
||||
@ -1533,7 +1536,7 @@ export default class StatementParser extends ExpressionParser {
|
||||
return (
|
||||
!lineBreak.test(input.slice(pos, next)) &&
|
||||
input.slice(next, next + 8) === "function" &&
|
||||
(next + 8 === input.length || !isIdentifierChar(input.charAt(next + 8)))
|
||||
(next + 8 === length || !isIdentifierChar(input.charCodeAt(next + 8)))
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@ -25,7 +25,7 @@ export default class UtilParser extends Tokenizer {
|
||||
|
||||
isLookaheadRelational(op: "<" | ">"): boolean {
|
||||
const l = this.lookahead();
|
||||
return l.type == tt.relational && l.value == op;
|
||||
return l.type === tt.relational && l.value === op;
|
||||
}
|
||||
|
||||
// TODO
|
||||
@ -87,7 +87,7 @@ export default class UtilParser extends Tokenizer {
|
||||
|
||||
hasPrecedingLineBreak(): boolean {
|
||||
return lineBreak.test(
|
||||
this.input.slice(this.state.lastTokEnd, this.state.start),
|
||||
this.state.input.slice(this.state.lastTokEnd, this.state.start),
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@ -1124,7 +1124,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
node.types = [];
|
||||
this.expect(tt.bracketL);
|
||||
// We allow trailing commas
|
||||
while (this.state.pos < this.input.length && !this.match(tt.bracketR)) {
|
||||
while (this.state.pos < this.state.length && !this.match(tt.bracketR)) {
|
||||
node.types.push(this.flowParseType());
|
||||
if (this.match(tt.bracketR)) break;
|
||||
this.expect(tt.comma);
|
||||
@ -1190,9 +1190,6 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
case "any":
|
||||
return this.finishNode(node, "AnyTypeAnnotation");
|
||||
|
||||
case "void":
|
||||
return this.finishNode(node, "VoidTypeAnnotation");
|
||||
|
||||
case "bool":
|
||||
case "boolean":
|
||||
return this.finishNode(node, "BooleanTypeAnnotation");
|
||||
@ -1369,6 +1366,10 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
"NumberLiteralTypeAnnotation",
|
||||
);
|
||||
|
||||
case tt._void:
|
||||
this.next();
|
||||
return this.finishNode(node, "VoidTypeAnnotation");
|
||||
|
||||
case tt._null:
|
||||
this.next();
|
||||
return this.finishNode(node, "NullLiteralTypeAnnotation");
|
||||
@ -1398,7 +1399,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
const startPos = this.state.start,
|
||||
startLoc = this.state.startLoc;
|
||||
let type = this.flowParsePrimaryType();
|
||||
while (!this.canInsertSemicolon() && this.match(tt.bracketL)) {
|
||||
while (this.match(tt.bracketL) && !this.canInsertSemicolon()) {
|
||||
const node = this.startNodeAt(startPos, startLoc);
|
||||
node.elementType = type;
|
||||
this.expect(tt.bracketL);
|
||||
@ -1630,7 +1631,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
this.match(tt.name) &&
|
||||
(this.state.value === "type" ||
|
||||
this.state.value === "interface" ||
|
||||
this.state.value == "opaque")
|
||||
this.state.value === "opaque")
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
@ -1922,20 +1923,12 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
}
|
||||
}
|
||||
|
||||
// don't consider `void` to be a keyword as then it'll use the void token type
|
||||
// and set startExpr
|
||||
isKeyword(name: string): boolean {
|
||||
if (this.state.inType && name === "void") {
|
||||
return false;
|
||||
} else {
|
||||
return super.isKeyword(name);
|
||||
}
|
||||
}
|
||||
|
||||
// ensure that inside flow types, we bypass the jsx parser plugin
|
||||
readToken(code: number): void {
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
if (
|
||||
getTokenFromCode(code: number): void {
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
if (code === charCodes.leftCurlyBrace && next === charCodes.verticalBar) {
|
||||
return this.finishOp(tt.braceBarL, 2);
|
||||
} else if (
|
||||
this.state.inType &&
|
||||
(code === charCodes.greaterThan || code === charCodes.lessThan)
|
||||
) {
|
||||
@ -1944,7 +1937,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
this.state.isIterator = true;
|
||||
return super.readWord();
|
||||
} else {
|
||||
return super.readToken(code);
|
||||
return super.getTokenFromCode(code);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2686,7 +2679,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
}
|
||||
|
||||
readToken_mult_modulo(code: number): void {
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
if (
|
||||
code === charCodes.asterisk &&
|
||||
next === charCodes.slash &&
|
||||
@ -2701,6 +2694,20 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
super.readToken_mult_modulo(code);
|
||||
}
|
||||
|
||||
readToken_pipe_amp(code: number): void {
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
if (
|
||||
code === charCodes.verticalBar &&
|
||||
next === charCodes.rightCurlyBrace
|
||||
) {
|
||||
// '|}'
|
||||
this.finishOp(tt.braceBarR, 2);
|
||||
return;
|
||||
}
|
||||
|
||||
super.readToken_pipe_amp(code);
|
||||
}
|
||||
|
||||
parseTopLevel(file: N.File, program: N.Program): N.File {
|
||||
const fileNode = super.parseTopLevel(file, program);
|
||||
if (this.state.hasFlowComment) {
|
||||
@ -2710,11 +2717,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
}
|
||||
|
||||
skipBlockComment(): void {
|
||||
if (
|
||||
this.hasPlugin("flow") &&
|
||||
this.hasPlugin("flowComments") &&
|
||||
this.skipFlowComment()
|
||||
) {
|
||||
if (this.hasPlugin("flowComments") && this.skipFlowComment()) {
|
||||
if (this.state.hasFlowComment) {
|
||||
this.unexpected(
|
||||
null,
|
||||
@ -2727,8 +2730,8 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.hasPlugin("flow") && this.state.hasFlowComment) {
|
||||
const end = this.input.indexOf("*-/", (this.state.pos += 2));
|
||||
if (this.state.hasFlowComment) {
|
||||
const end = this.state.input.indexOf("*-/", (this.state.pos += 2));
|
||||
if (end === -1) this.raise(this.state.pos - 2, "Unterminated comment");
|
||||
this.state.pos = end + 3;
|
||||
return;
|
||||
@ -2742,20 +2745,22 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
let shiftToFirstNonWhiteSpace = 2;
|
||||
while (
|
||||
[charCodes.space, charCodes.tab].includes(
|
||||
this.input.charCodeAt(pos + shiftToFirstNonWhiteSpace),
|
||||
this.state.input.charCodeAt(pos + shiftToFirstNonWhiteSpace),
|
||||
)
|
||||
) {
|
||||
shiftToFirstNonWhiteSpace++;
|
||||
}
|
||||
|
||||
const ch2 = this.input.charCodeAt(shiftToFirstNonWhiteSpace + pos);
|
||||
const ch3 = this.input.charCodeAt(shiftToFirstNonWhiteSpace + pos + 1);
|
||||
const ch2 = this.state.input.charCodeAt(shiftToFirstNonWhiteSpace + pos);
|
||||
const ch3 = this.state.input.charCodeAt(
|
||||
shiftToFirstNonWhiteSpace + pos + 1,
|
||||
);
|
||||
|
||||
if (ch2 === charCodes.colon && ch3 === charCodes.colon) {
|
||||
return shiftToFirstNonWhiteSpace + 2; // check for /*::
|
||||
}
|
||||
if (
|
||||
this.input.slice(
|
||||
this.state.input.slice(
|
||||
shiftToFirstNonWhiteSpace + pos,
|
||||
shiftToFirstNonWhiteSpace + pos + 12,
|
||||
) === "flow-include"
|
||||
@ -2769,7 +2774,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
}
|
||||
|
||||
hasFlowCommentCompletion(): void {
|
||||
const end = this.input.indexOf("*/", this.state.pos);
|
||||
const end = this.state.input.indexOf("*/", this.state.pos);
|
||||
if (end === -1) {
|
||||
this.raise(this.state.pos, "Unterminated comment");
|
||||
}
|
||||
|
||||
@ -79,11 +79,11 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
let out = "";
|
||||
let chunkStart = this.state.pos;
|
||||
for (;;) {
|
||||
if (this.state.pos >= this.input.length) {
|
||||
if (this.state.pos >= this.state.length) {
|
||||
this.raise(this.state.start, "Unterminated JSX contents");
|
||||
}
|
||||
|
||||
const ch = this.input.charCodeAt(this.state.pos);
|
||||
const ch = this.state.input.charCodeAt(this.state.pos);
|
||||
|
||||
switch (ch) {
|
||||
case charCodes.lessThan:
|
||||
@ -93,20 +93,20 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
++this.state.pos;
|
||||
return this.finishToken(tt.jsxTagStart);
|
||||
}
|
||||
return this.getTokenFromCode(ch);
|
||||
return super.getTokenFromCode(ch);
|
||||
}
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
return this.finishToken(tt.jsxText, out);
|
||||
|
||||
case charCodes.ampersand:
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
out += this.jsxReadEntity();
|
||||
chunkStart = this.state.pos;
|
||||
break;
|
||||
|
||||
default:
|
||||
if (isNewLine(ch)) {
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
out += this.jsxReadNewLine(true);
|
||||
chunkStart = this.state.pos;
|
||||
} else {
|
||||
@ -117,12 +117,12 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
}
|
||||
|
||||
jsxReadNewLine(normalizeCRLF: boolean): string {
|
||||
const ch = this.input.charCodeAt(this.state.pos);
|
||||
const ch = this.state.input.charCodeAt(this.state.pos);
|
||||
let out;
|
||||
++this.state.pos;
|
||||
if (
|
||||
ch === charCodes.carriageReturn &&
|
||||
this.input.charCodeAt(this.state.pos) === charCodes.lineFeed
|
||||
this.state.input.charCodeAt(this.state.pos) === charCodes.lineFeed
|
||||
) {
|
||||
++this.state.pos;
|
||||
out = normalizeCRLF ? "\n" : "\r\n";
|
||||
@ -139,25 +139,25 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
let out = "";
|
||||
let chunkStart = ++this.state.pos;
|
||||
for (;;) {
|
||||
if (this.state.pos >= this.input.length) {
|
||||
if (this.state.pos >= this.state.length) {
|
||||
this.raise(this.state.start, "Unterminated string constant");
|
||||
}
|
||||
|
||||
const ch = this.input.charCodeAt(this.state.pos);
|
||||
const ch = this.state.input.charCodeAt(this.state.pos);
|
||||
if (ch === quote) break;
|
||||
if (ch === charCodes.ampersand) {
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
out += this.jsxReadEntity();
|
||||
chunkStart = this.state.pos;
|
||||
} else if (isNewLine(ch)) {
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
out += this.jsxReadNewLine(false);
|
||||
chunkStart = this.state.pos;
|
||||
} else {
|
||||
++this.state.pos;
|
||||
}
|
||||
}
|
||||
out += this.input.slice(chunkStart, this.state.pos++);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos++);
|
||||
return this.finishToken(tt.string, out);
|
||||
}
|
||||
|
||||
@ -165,11 +165,11 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
let str = "";
|
||||
let count = 0;
|
||||
let entity;
|
||||
let ch = this.input[this.state.pos];
|
||||
let ch = this.state.input[this.state.pos];
|
||||
|
||||
const startPos = ++this.state.pos;
|
||||
while (this.state.pos < this.input.length && count++ < 10) {
|
||||
ch = this.input[this.state.pos++];
|
||||
while (this.state.pos < this.state.length && count++ < 10) {
|
||||
ch = this.state.input[this.state.pos++];
|
||||
if (ch === ";") {
|
||||
if (str[0] === "#") {
|
||||
if (str[1] === "x") {
|
||||
@ -208,11 +208,11 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
let ch;
|
||||
const start = this.state.pos;
|
||||
do {
|
||||
ch = this.input.charCodeAt(++this.state.pos);
|
||||
ch = this.state.input.charCodeAt(++this.state.pos);
|
||||
} while (isIdentifierChar(ch) || ch === charCodes.dash);
|
||||
return this.finishToken(
|
||||
tt.jsxName,
|
||||
this.input.slice(start, this.state.pos),
|
||||
this.state.input.slice(start, this.state.pos),
|
||||
);
|
||||
}
|
||||
|
||||
@ -520,8 +520,8 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
}
|
||||
}
|
||||
|
||||
readToken(code: number): void {
|
||||
if (this.state.inPropertyName) return super.readToken(code);
|
||||
getTokenFromCode(code: number): void {
|
||||
if (this.state.inPropertyName) return super.getTokenFromCode(code);
|
||||
|
||||
const context = this.curContext();
|
||||
|
||||
@ -557,7 +557,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
return this.finishToken(tt.jsxTagStart);
|
||||
}
|
||||
|
||||
return super.readToken(code);
|
||||
return super.getTokenFromCode(code);
|
||||
}
|
||||
|
||||
updateContext(prevType: TokenType): void {
|
||||
|
||||
@ -1324,7 +1324,7 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
}
|
||||
|
||||
tsCheckLineTerminatorAndMatch(tokenType: TokenType, next: boolean) {
|
||||
return !this.isLineTerminator() && (next || this.match(tokenType));
|
||||
return (next || this.match(tokenType)) && !this.isLineTerminator();
|
||||
}
|
||||
|
||||
tsTryParseGenericAsyncArrowFunction(
|
||||
@ -2218,11 +2218,11 @@ export default (superClass: Class<Parser>): Class<Parser> =>
|
||||
}
|
||||
|
||||
// ensure that inside types, we bypass the jsx parser plugin
|
||||
readToken(code: number): void {
|
||||
getTokenFromCode(code: number): void {
|
||||
if (this.state.inType && (code === 62 || code === 60)) {
|
||||
return this.finishOp(tt.relational, 1);
|
||||
} else {
|
||||
return super.readToken(code);
|
||||
return super.getTokenFromCode(code);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -107,7 +107,9 @@ tt._function.updateContext = tt._class.updateContext = function(prevType) {
|
||||
prevType !== tt._else &&
|
||||
!(
|
||||
prevType === tt._return &&
|
||||
lineBreak.test(this.input.slice(this.state.lastTokEnd, this.state.start))
|
||||
lineBreak.test(
|
||||
this.state.input.slice(this.state.lastTokEnd, this.state.start),
|
||||
)
|
||||
) &&
|
||||
!(
|
||||
(prevType === tt.colon || prevType === tt.braceL) &&
|
||||
|
||||
@ -3,11 +3,7 @@
|
||||
import type { Options } from "../options";
|
||||
import type { Position } from "../util/location";
|
||||
import * as charCodes from "charcodes";
|
||||
import {
|
||||
isIdentifierStart,
|
||||
isIdentifierChar,
|
||||
isKeyword,
|
||||
} from "../util/identifier";
|
||||
import { isIdentifierStart, isIdentifierChar } from "../util/identifier";
|
||||
import { types as tt, keywords as keywordTypes, type TokenType } from "./types";
|
||||
import { type TokContext, types as ct } from "./context";
|
||||
import LocationParser from "../parser/location";
|
||||
@ -20,7 +16,7 @@ import {
|
||||
} from "../util/whitespace";
|
||||
import State from "./state";
|
||||
|
||||
const VALID_REGEX_FLAGS = "gmsiyu";
|
||||
const VALID_REGEX_FLAGS = new Set(["g", "m", "s", "i", "y", "u"]);
|
||||
|
||||
// The following character codes are forbidden from being
|
||||
// an immediate sibling of NumericLiteralSeparator _
|
||||
@ -157,12 +153,6 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
// TODO
|
||||
|
||||
isKeyword(word: string): boolean {
|
||||
return isKeyword(word);
|
||||
}
|
||||
|
||||
// TODO
|
||||
|
||||
lookahead(): State {
|
||||
const old = this.state;
|
||||
this.state = old.clone(true);
|
||||
@ -185,7 +175,7 @@ export default class Tokenizer extends LocationParser {
|
||||
this.state.pos = this.state.start;
|
||||
while (this.state.pos < this.state.lineStart) {
|
||||
this.state.lineStart =
|
||||
this.input.lastIndexOf("\n", this.state.lineStart - 2) + 1;
|
||||
this.state.input.lastIndexOf("\n", this.state.lineStart - 2) + 1;
|
||||
--this.state.curLine;
|
||||
}
|
||||
this.nextToken();
|
||||
@ -206,7 +196,7 @@ export default class Tokenizer extends LocationParser {
|
||||
this.state.octalPosition = null;
|
||||
this.state.start = this.state.pos;
|
||||
this.state.startLoc = this.state.curPosition();
|
||||
if (this.state.pos >= this.input.length) {
|
||||
if (this.state.pos >= this.state.length) {
|
||||
this.finishToken(tt.eof);
|
||||
return;
|
||||
}
|
||||
@ -214,17 +204,7 @@ export default class Tokenizer extends LocationParser {
|
||||
if (curContext.override) {
|
||||
curContext.override(this);
|
||||
} else {
|
||||
this.readToken(this.input.codePointAt(this.state.pos));
|
||||
}
|
||||
}
|
||||
|
||||
readToken(code: number): void {
|
||||
// Identifier or keyword. '\uXXXX' sequences are allowed in
|
||||
// identifiers, so '\' also dispatches to that.
|
||||
if (isIdentifierStart(code) || code === charCodes.backslash) {
|
||||
this.readWord();
|
||||
} else {
|
||||
this.getTokenFromCode(code);
|
||||
this.getTokenFromCode(this.state.input.codePointAt(this.state.pos));
|
||||
}
|
||||
}
|
||||
|
||||
@ -254,14 +234,14 @@ export default class Tokenizer extends LocationParser {
|
||||
skipBlockComment(): void {
|
||||
const startLoc = this.state.curPosition();
|
||||
const start = this.state.pos;
|
||||
const end = this.input.indexOf("*/", (this.state.pos += 2));
|
||||
const end = this.state.input.indexOf("*/", (this.state.pos += 2));
|
||||
if (end === -1) this.raise(this.state.pos - 2, "Unterminated comment");
|
||||
|
||||
this.state.pos = end + 2;
|
||||
lineBreakG.lastIndex = start;
|
||||
let match;
|
||||
while (
|
||||
(match = lineBreakG.exec(this.input)) &&
|
||||
(match = lineBreakG.exec(this.state.input)) &&
|
||||
match.index < this.state.pos
|
||||
) {
|
||||
++this.state.curLine;
|
||||
@ -270,7 +250,7 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
this.pushComment(
|
||||
true,
|
||||
this.input.slice(start + 2, end),
|
||||
this.state.input.slice(start + 2, end),
|
||||
start,
|
||||
this.state.pos,
|
||||
startLoc,
|
||||
@ -281,22 +261,22 @@ export default class Tokenizer extends LocationParser {
|
||||
skipLineComment(startSkip: number): void {
|
||||
const start = this.state.pos;
|
||||
const startLoc = this.state.curPosition();
|
||||
let ch = this.input.charCodeAt((this.state.pos += startSkip));
|
||||
if (this.state.pos < this.input.length) {
|
||||
let ch = this.state.input.charCodeAt((this.state.pos += startSkip));
|
||||
if (this.state.pos < this.state.length) {
|
||||
while (
|
||||
ch !== charCodes.lineFeed &&
|
||||
ch !== charCodes.carriageReturn &&
|
||||
ch !== charCodes.lineSeparator &&
|
||||
ch !== charCodes.paragraphSeparator &&
|
||||
++this.state.pos < this.input.length
|
||||
++this.state.pos < this.state.length
|
||||
) {
|
||||
ch = this.input.charCodeAt(this.state.pos);
|
||||
ch = this.state.input.charCodeAt(this.state.pos);
|
||||
}
|
||||
}
|
||||
|
||||
this.pushComment(
|
||||
false,
|
||||
this.input.slice(start + startSkip, this.state.pos),
|
||||
this.state.input.slice(start + startSkip, this.state.pos),
|
||||
start,
|
||||
this.state.pos,
|
||||
startLoc,
|
||||
@ -308,12 +288,18 @@ export default class Tokenizer extends LocationParser {
|
||||
// whitespace and comments, and.
|
||||
|
||||
skipSpace(): void {
|
||||
loop: while (this.state.pos < this.input.length) {
|
||||
const ch = this.input.charCodeAt(this.state.pos);
|
||||
loop: while (this.state.pos < this.state.length) {
|
||||
const ch = this.state.input.charCodeAt(this.state.pos);
|
||||
switch (ch) {
|
||||
case charCodes.space:
|
||||
case charCodes.nonBreakingSpace:
|
||||
case charCodes.tab:
|
||||
++this.state.pos;
|
||||
break;
|
||||
case charCodes.carriageReturn:
|
||||
if (
|
||||
this.input.charCodeAt(this.state.pos + 1) === charCodes.lineFeed
|
||||
this.state.input.charCodeAt(this.state.pos + 1) ===
|
||||
charCodes.lineFeed
|
||||
) {
|
||||
++this.state.pos;
|
||||
}
|
||||
@ -327,7 +313,7 @@ export default class Tokenizer extends LocationParser {
|
||||
break;
|
||||
|
||||
case charCodes.slash:
|
||||
switch (this.input.charCodeAt(this.state.pos + 1)) {
|
||||
switch (this.state.input.charCodeAt(this.state.pos + 1)) {
|
||||
case charCodes.asterisk:
|
||||
this.skipBlockComment();
|
||||
break;
|
||||
@ -382,7 +368,7 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
|
||||
const nextPos = this.state.pos + 1;
|
||||
const next = this.input.charCodeAt(nextPos);
|
||||
const next = this.state.input.charCodeAt(nextPos);
|
||||
if (next >= charCodes.digit0 && next <= charCodes.digit9) {
|
||||
this.raise(this.state.pos, "Unexpected digit after hash token");
|
||||
}
|
||||
@ -405,13 +391,13 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
|
||||
readToken_dot(): void {
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
if (next >= charCodes.digit0 && next <= charCodes.digit9) {
|
||||
this.readNumber(true);
|
||||
return;
|
||||
}
|
||||
|
||||
const next2 = this.input.charCodeAt(this.state.pos + 2);
|
||||
const next2 = this.state.input.charCodeAt(this.state.pos + 2);
|
||||
if (next === charCodes.dot && next2 === charCodes.dot) {
|
||||
this.state.pos += 3;
|
||||
this.finishToken(tt.ellipsis);
|
||||
@ -429,7 +415,7 @@ export default class Tokenizer extends LocationParser {
|
||||
return;
|
||||
}
|
||||
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
if (next === charCodes.equalsTo) {
|
||||
this.finishOp(tt.assign, 2);
|
||||
} else {
|
||||
@ -438,12 +424,12 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
|
||||
readToken_interpreter(): boolean {
|
||||
if (this.state.pos !== 0 || this.state.input.length < 2) return false;
|
||||
if (this.state.pos !== 0 || this.state.length < 2) return false;
|
||||
|
||||
const start = this.state.pos;
|
||||
this.state.pos += 1;
|
||||
|
||||
let ch = this.input.charCodeAt(this.state.pos);
|
||||
let ch = this.state.input.charCodeAt(this.state.pos);
|
||||
if (ch !== charCodes.exclamationMark) return false;
|
||||
|
||||
while (
|
||||
@ -451,12 +437,12 @@ export default class Tokenizer extends LocationParser {
|
||||
ch !== charCodes.carriageReturn &&
|
||||
ch !== charCodes.lineSeparator &&
|
||||
ch !== charCodes.paragraphSeparator &&
|
||||
++this.state.pos < this.input.length
|
||||
++this.state.pos < this.state.length
|
||||
) {
|
||||
ch = this.input.charCodeAt(this.state.pos);
|
||||
ch = this.state.input.charCodeAt(this.state.pos);
|
||||
}
|
||||
|
||||
const value = this.input.slice(start + 2, this.state.pos);
|
||||
const value = this.state.input.slice(start + 2, this.state.pos);
|
||||
|
||||
this.finishToken(tt.interpreterDirective, value);
|
||||
|
||||
@ -467,13 +453,13 @@ export default class Tokenizer extends LocationParser {
|
||||
// '%*'
|
||||
let type = code === charCodes.asterisk ? tt.star : tt.modulo;
|
||||
let width = 1;
|
||||
let next = this.input.charCodeAt(this.state.pos + 1);
|
||||
let next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
const exprAllowed = this.state.exprAllowed;
|
||||
|
||||
// Exponentiation operator **
|
||||
if (code === charCodes.asterisk && next === charCodes.asterisk) {
|
||||
width++;
|
||||
next = this.input.charCodeAt(this.state.pos + 2);
|
||||
next = this.state.input.charCodeAt(this.state.pos + 2);
|
||||
type = tt.exponent;
|
||||
}
|
||||
|
||||
@ -486,11 +472,13 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
|
||||
readToken_pipe_amp(code: number): void {
|
||||
// '|&'
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
// '||' '&&' '||=' '&&='
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
|
||||
if (next === code) {
|
||||
if (this.input.charCodeAt(this.state.pos + 2) === charCodes.equalsTo) {
|
||||
if (
|
||||
this.state.input.charCodeAt(this.state.pos + 2) === charCodes.equalsTo
|
||||
) {
|
||||
this.finishOp(tt.assign, 3);
|
||||
} else {
|
||||
this.finishOp(
|
||||
@ -506,10 +494,6 @@ export default class Tokenizer extends LocationParser {
|
||||
if (next === charCodes.greaterThan) {
|
||||
this.finishOp(tt.pipeline, 2);
|
||||
return;
|
||||
} else if (next === charCodes.rightCurlyBrace && this.hasPlugin("flow")) {
|
||||
// '|}'
|
||||
this.finishOp(tt.braceBarR, 2);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -526,7 +510,7 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
readToken_caret(): void {
|
||||
// '^'
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
if (next === charCodes.equalsTo) {
|
||||
this.finishOp(tt.assign, 2);
|
||||
} else {
|
||||
@ -536,14 +520,17 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
readToken_plus_min(code: number): void {
|
||||
// '+-'
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
|
||||
if (next === code) {
|
||||
if (
|
||||
next === charCodes.dash &&
|
||||
!this.inModule &&
|
||||
this.input.charCodeAt(this.state.pos + 2) === charCodes.greaterThan &&
|
||||
lineBreak.test(this.input.slice(this.state.lastTokEnd, this.state.pos))
|
||||
this.state.input.charCodeAt(this.state.pos + 2) ===
|
||||
charCodes.greaterThan &&
|
||||
lineBreak.test(
|
||||
this.state.input.slice(this.state.lastTokEnd, this.state.pos),
|
||||
)
|
||||
) {
|
||||
// A `-->` line comment
|
||||
this.skipLineComment(3);
|
||||
@ -564,16 +551,20 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
readToken_lt_gt(code: number): void {
|
||||
// '<>'
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
let size = 1;
|
||||
|
||||
if (next === code) {
|
||||
size =
|
||||
code === charCodes.greaterThan &&
|
||||
this.input.charCodeAt(this.state.pos + 2) === charCodes.greaterThan
|
||||
this.state.input.charCodeAt(this.state.pos + 2) ===
|
||||
charCodes.greaterThan
|
||||
? 3
|
||||
: 2;
|
||||
if (this.input.charCodeAt(this.state.pos + size) === charCodes.equalsTo) {
|
||||
if (
|
||||
this.state.input.charCodeAt(this.state.pos + size) ===
|
||||
charCodes.equalsTo
|
||||
) {
|
||||
this.finishOp(tt.assign, size + 1);
|
||||
return;
|
||||
}
|
||||
@ -585,8 +576,8 @@ export default class Tokenizer extends LocationParser {
|
||||
next === charCodes.exclamationMark &&
|
||||
code === charCodes.lessThan &&
|
||||
!this.inModule &&
|
||||
this.input.charCodeAt(this.state.pos + 2) === charCodes.dash &&
|
||||
this.input.charCodeAt(this.state.pos + 3) === charCodes.dash
|
||||
this.state.input.charCodeAt(this.state.pos + 2) === charCodes.dash &&
|
||||
this.state.input.charCodeAt(this.state.pos + 3) === charCodes.dash
|
||||
) {
|
||||
// `<!--`, an XML-style comment that should be interpreted as a line comment
|
||||
this.skipLineComment(4);
|
||||
@ -605,11 +596,11 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
readToken_eq_excl(code: number): void {
|
||||
// '=!'
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
if (next === charCodes.equalsTo) {
|
||||
this.finishOp(
|
||||
tt.equality,
|
||||
this.input.charCodeAt(this.state.pos + 2) === charCodes.equalsTo
|
||||
this.state.input.charCodeAt(this.state.pos + 2) === charCodes.equalsTo
|
||||
? 3
|
||||
: 2,
|
||||
);
|
||||
@ -626,8 +617,8 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
readToken_question(): void {
|
||||
// '?'
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next2 = this.input.charCodeAt(this.state.pos + 2);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
const next2 = this.state.input.charCodeAt(this.state.pos + 2);
|
||||
if (next === charCodes.questionMark && !this.state.inType) {
|
||||
if (next2 === charCodes.equalsTo) {
|
||||
// '??='
|
||||
@ -651,10 +642,6 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
getTokenFromCode(code: number): void {
|
||||
switch (code) {
|
||||
case charCodes.numberSign:
|
||||
this.readToken_numberSign();
|
||||
return;
|
||||
|
||||
// The interpretation of a dot depends on whether it is followed
|
||||
// by a digit or another two dots.
|
||||
|
||||
@ -687,19 +674,10 @@ export default class Tokenizer extends LocationParser {
|
||||
++this.state.pos;
|
||||
this.finishToken(tt.bracketR);
|
||||
return;
|
||||
|
||||
case charCodes.leftCurlyBrace:
|
||||
if (
|
||||
this.hasPlugin("flow") &&
|
||||
this.input.charCodeAt(this.state.pos + 1) === charCodes.verticalBar
|
||||
) {
|
||||
this.finishOp(tt.braceBarL, 2);
|
||||
} else {
|
||||
++this.state.pos;
|
||||
this.finishToken(tt.braceL);
|
||||
}
|
||||
return;
|
||||
|
||||
case charCodes.rightCurlyBrace:
|
||||
++this.state.pos;
|
||||
this.finishToken(tt.braceR);
|
||||
@ -708,7 +686,7 @@ export default class Tokenizer extends LocationParser {
|
||||
case charCodes.colon:
|
||||
if (
|
||||
this.hasPlugin("functionBind") &&
|
||||
this.input.charCodeAt(this.state.pos + 1) === charCodes.colon
|
||||
this.state.input.charCodeAt(this.state.pos + 1) === charCodes.colon
|
||||
) {
|
||||
this.finishOp(tt.doubleColon, 2);
|
||||
} else {
|
||||
@ -720,10 +698,6 @@ export default class Tokenizer extends LocationParser {
|
||||
case charCodes.questionMark:
|
||||
this.readToken_question();
|
||||
return;
|
||||
case charCodes.atSign:
|
||||
++this.state.pos;
|
||||
this.finishToken(tt.at);
|
||||
return;
|
||||
|
||||
case charCodes.graveAccent:
|
||||
++this.state.pos;
|
||||
@ -731,7 +705,7 @@ export default class Tokenizer extends LocationParser {
|
||||
return;
|
||||
|
||||
case charCodes.digit0: {
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
// '0x', '0X' - hex number
|
||||
if (next === charCodes.lowercaseX || next === charCodes.uppercaseX) {
|
||||
this.readRadixNumber(16);
|
||||
@ -809,6 +783,25 @@ export default class Tokenizer extends LocationParser {
|
||||
case charCodes.tilde:
|
||||
this.finishOp(tt.tilde, 1);
|
||||
return;
|
||||
|
||||
case charCodes.atSign:
|
||||
++this.state.pos;
|
||||
this.finishToken(tt.at);
|
||||
return;
|
||||
|
||||
case charCodes.numberSign:
|
||||
this.readToken_numberSign();
|
||||
return;
|
||||
|
||||
case charCodes.backslash:
|
||||
this.readWord();
|
||||
return;
|
||||
|
||||
default:
|
||||
if (isIdentifierStart(code)) {
|
||||
this.readWord();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
this.raise(
|
||||
@ -818,7 +811,7 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
|
||||
finishOp(type: TokenType, size: number): void {
|
||||
const str = this.input.slice(this.state.pos, this.state.pos + size);
|
||||
const str = this.state.input.slice(this.state.pos, this.state.pos + size);
|
||||
this.state.pos += size;
|
||||
this.finishToken(type, str);
|
||||
}
|
||||
@ -827,10 +820,10 @@ export default class Tokenizer extends LocationParser {
|
||||
const start = this.state.pos;
|
||||
let escaped, inClass;
|
||||
for (;;) {
|
||||
if (this.state.pos >= this.input.length) {
|
||||
if (this.state.pos >= this.state.length) {
|
||||
this.raise(start, "Unterminated regular expression");
|
||||
}
|
||||
const ch = this.input.charAt(this.state.pos);
|
||||
const ch = this.state.input.charAt(this.state.pos);
|
||||
if (lineBreak.test(ch)) {
|
||||
this.raise(start, "Unterminated regular expression");
|
||||
}
|
||||
@ -848,16 +841,16 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
++this.state.pos;
|
||||
}
|
||||
const content = this.input.slice(start, this.state.pos);
|
||||
const content = this.state.input.slice(start, this.state.pos);
|
||||
++this.state.pos;
|
||||
|
||||
let mods = "";
|
||||
|
||||
while (this.state.pos < this.input.length) {
|
||||
const char = this.input[this.state.pos];
|
||||
const charCode = this.input.codePointAt(this.state.pos);
|
||||
while (this.state.pos < this.state.length) {
|
||||
const char = this.state.input[this.state.pos];
|
||||
const charCode = this.state.input.codePointAt(this.state.pos);
|
||||
|
||||
if (VALID_REGEX_FLAGS.indexOf(char) > -1) {
|
||||
if (VALID_REGEX_FLAGS.has(char)) {
|
||||
if (mods.indexOf(char) > -1) {
|
||||
this.raise(this.state.pos + 1, "Duplicate regular expression flag");
|
||||
}
|
||||
@ -902,12 +895,12 @@ export default class Tokenizer extends LocationParser {
|
||||
let total = 0;
|
||||
|
||||
for (let i = 0, e = len == null ? Infinity : len; i < e; ++i) {
|
||||
const code = this.input.charCodeAt(this.state.pos);
|
||||
const code = this.state.input.charCodeAt(this.state.pos);
|
||||
let val;
|
||||
|
||||
if (this.hasPlugin("numericSeparator")) {
|
||||
const prev = this.input.charCodeAt(this.state.pos - 1);
|
||||
const next = this.input.charCodeAt(this.state.pos + 1);
|
||||
const prev = this.state.input.charCodeAt(this.state.pos - 1);
|
||||
const next = this.state.input.charCodeAt(this.state.pos + 1);
|
||||
if (code === charCodes.underscore) {
|
||||
if (allowedSiblings.indexOf(next) === -1) {
|
||||
this.raise(this.state.pos, "Invalid or unexpected token");
|
||||
@ -961,18 +954,22 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
|
||||
if (this.hasPlugin("bigInt")) {
|
||||
if (this.input.charCodeAt(this.state.pos) === charCodes.lowercaseN) {
|
||||
if (
|
||||
this.state.input.charCodeAt(this.state.pos) === charCodes.lowercaseN
|
||||
) {
|
||||
++this.state.pos;
|
||||
isBigInt = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (isIdentifierStart(this.input.codePointAt(this.state.pos))) {
|
||||
if (isIdentifierStart(this.state.input.codePointAt(this.state.pos))) {
|
||||
this.raise(this.state.pos, "Identifier directly after number");
|
||||
}
|
||||
|
||||
if (isBigInt) {
|
||||
const str = this.input.slice(start, this.state.pos).replace(/[_n]/g, "");
|
||||
const str = this.state.input
|
||||
.slice(start, this.state.pos)
|
||||
.replace(/[_n]/g, "");
|
||||
this.finishToken(tt.bigint, str);
|
||||
return;
|
||||
}
|
||||
@ -992,7 +989,7 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
let octal =
|
||||
this.state.pos - start >= 2 &&
|
||||
this.input.charCodeAt(start) === charCodes.digit0;
|
||||
this.state.input.charCodeAt(start) === charCodes.digit0;
|
||||
if (octal) {
|
||||
if (this.state.strict) {
|
||||
this.raise(
|
||||
@ -1000,30 +997,30 @@ export default class Tokenizer extends LocationParser {
|
||||
"Legacy octal literals are not allowed in strict mode",
|
||||
);
|
||||
}
|
||||
if (/[89]/.test(this.input.slice(start, this.state.pos))) {
|
||||
if (/[89]/.test(this.state.input.slice(start, this.state.pos))) {
|
||||
octal = false;
|
||||
}
|
||||
}
|
||||
|
||||
let next = this.input.charCodeAt(this.state.pos);
|
||||
let next = this.state.input.charCodeAt(this.state.pos);
|
||||
if (next === charCodes.dot && !octal) {
|
||||
++this.state.pos;
|
||||
this.readInt(10);
|
||||
isFloat = true;
|
||||
next = this.input.charCodeAt(this.state.pos);
|
||||
next = this.state.input.charCodeAt(this.state.pos);
|
||||
}
|
||||
|
||||
if (
|
||||
(next === charCodes.uppercaseE || next === charCodes.lowercaseE) &&
|
||||
!octal
|
||||
) {
|
||||
next = this.input.charCodeAt(++this.state.pos);
|
||||
next = this.state.input.charCodeAt(++this.state.pos);
|
||||
if (next === charCodes.plusSign || next === charCodes.dash) {
|
||||
++this.state.pos;
|
||||
}
|
||||
if (this.readInt(10) === null) this.raise(start, "Invalid number");
|
||||
isFloat = true;
|
||||
next = this.input.charCodeAt(this.state.pos);
|
||||
next = this.state.input.charCodeAt(this.state.pos);
|
||||
}
|
||||
|
||||
if (this.hasPlugin("bigInt")) {
|
||||
@ -1035,12 +1032,14 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
}
|
||||
|
||||
if (isIdentifierStart(this.input.codePointAt(this.state.pos))) {
|
||||
if (isIdentifierStart(this.state.input.codePointAt(this.state.pos))) {
|
||||
this.raise(this.state.pos, "Identifier directly after number");
|
||||
}
|
||||
|
||||
// remove "_" for numeric literal separator, and "n" for BigInts
|
||||
const str = this.input.slice(start, this.state.pos).replace(/[_n]/g, "");
|
||||
const str = this.state.input
|
||||
.slice(start, this.state.pos)
|
||||
.replace(/[_n]/g, "");
|
||||
|
||||
if (isBigInt) {
|
||||
this.finishToken(tt.bigint, str);
|
||||
@ -1054,13 +1053,13 @@ export default class Tokenizer extends LocationParser {
|
||||
// Read a string value, interpreting backslash-escapes.
|
||||
|
||||
readCodePoint(throwOnInvalid: boolean): number | null {
|
||||
const ch = this.input.charCodeAt(this.state.pos);
|
||||
const ch = this.state.input.charCodeAt(this.state.pos);
|
||||
let code;
|
||||
|
||||
if (ch === charCodes.leftCurlyBrace) {
|
||||
const codePos = ++this.state.pos;
|
||||
code = this.readHexChar(
|
||||
this.input.indexOf("}", this.state.pos) - this.state.pos,
|
||||
this.state.input.indexOf("}", this.state.pos) - this.state.pos,
|
||||
throwOnInvalid,
|
||||
);
|
||||
++this.state.pos;
|
||||
@ -1085,13 +1084,13 @@ export default class Tokenizer extends LocationParser {
|
||||
let out = "",
|
||||
chunkStart = ++this.state.pos;
|
||||
for (;;) {
|
||||
if (this.state.pos >= this.input.length) {
|
||||
if (this.state.pos >= this.state.length) {
|
||||
this.raise(this.state.start, "Unterminated string constant");
|
||||
}
|
||||
const ch = this.input.charCodeAt(this.state.pos);
|
||||
const ch = this.state.input.charCodeAt(this.state.pos);
|
||||
if (ch === quote) break;
|
||||
if (ch === charCodes.backslash) {
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
// $FlowFixMe
|
||||
out += this.readEscapedChar(false);
|
||||
chunkStart = this.state.pos;
|
||||
@ -1107,7 +1106,7 @@ export default class Tokenizer extends LocationParser {
|
||||
++this.state.pos;
|
||||
}
|
||||
}
|
||||
out += this.input.slice(chunkStart, this.state.pos++);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos++);
|
||||
this.finishToken(tt.string, out);
|
||||
}
|
||||
|
||||
@ -1118,14 +1117,14 @@ export default class Tokenizer extends LocationParser {
|
||||
chunkStart = this.state.pos,
|
||||
containsInvalid = false;
|
||||
for (;;) {
|
||||
if (this.state.pos >= this.input.length) {
|
||||
if (this.state.pos >= this.state.length) {
|
||||
this.raise(this.state.start, "Unterminated template");
|
||||
}
|
||||
const ch = this.input.charCodeAt(this.state.pos);
|
||||
const ch = this.state.input.charCodeAt(this.state.pos);
|
||||
if (
|
||||
ch === charCodes.graveAccent ||
|
||||
(ch === charCodes.dollarSign &&
|
||||
this.input.charCodeAt(this.state.pos + 1) ===
|
||||
this.state.input.charCodeAt(this.state.pos + 1) ===
|
||||
charCodes.leftCurlyBrace)
|
||||
) {
|
||||
if (this.state.pos === this.state.start && this.match(tt.template)) {
|
||||
@ -1139,12 +1138,12 @@ export default class Tokenizer extends LocationParser {
|
||||
return;
|
||||
}
|
||||
}
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
this.finishToken(tt.template, containsInvalid ? null : out);
|
||||
return;
|
||||
}
|
||||
if (ch === charCodes.backslash) {
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
const escaped = this.readEscapedChar(true);
|
||||
if (escaped === null) {
|
||||
containsInvalid = true;
|
||||
@ -1153,11 +1152,13 @@ export default class Tokenizer extends LocationParser {
|
||||
}
|
||||
chunkStart = this.state.pos;
|
||||
} else if (isNewLine(ch)) {
|
||||
out += this.input.slice(chunkStart, this.state.pos);
|
||||
out += this.state.input.slice(chunkStart, this.state.pos);
|
||||
++this.state.pos;
|
||||
switch (ch) {
|
||||
case charCodes.carriageReturn:
|
||||
if (this.input.charCodeAt(this.state.pos) === charCodes.lineFeed) {
|
||||
if (
|
||||
this.state.input.charCodeAt(this.state.pos) === charCodes.lineFeed
|
||||
) {
|
||||
++this.state.pos;
|
||||
}
|
||||
case charCodes.lineFeed:
|
||||
@ -1180,7 +1181,7 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
readEscapedChar(inTemplate: boolean): string | null {
|
||||
const throwOnInvalid = !inTemplate;
|
||||
const ch = this.input.charCodeAt(++this.state.pos);
|
||||
const ch = this.state.input.charCodeAt(++this.state.pos);
|
||||
++this.state.pos;
|
||||
switch (ch) {
|
||||
case charCodes.lowercaseN:
|
||||
@ -1204,7 +1205,9 @@ export default class Tokenizer extends LocationParser {
|
||||
case charCodes.lowercaseF:
|
||||
return "\f";
|
||||
case charCodes.carriageReturn:
|
||||
if (this.input.charCodeAt(this.state.pos) === charCodes.lineFeed) {
|
||||
if (
|
||||
this.state.input.charCodeAt(this.state.pos) === charCodes.lineFeed
|
||||
) {
|
||||
++this.state.pos;
|
||||
}
|
||||
case charCodes.lineFeed:
|
||||
@ -1215,7 +1218,7 @@ export default class Tokenizer extends LocationParser {
|
||||
if (ch >= charCodes.digit0 && ch <= charCodes.digit7) {
|
||||
const codePos = this.state.pos - 1;
|
||||
// $FlowFixMe
|
||||
let octalStr = this.input
|
||||
let octalStr = this.state.input
|
||||
.substr(this.state.pos - 1, 3)
|
||||
.match(/^[0-7]+/)[0];
|
||||
let octal = parseInt(octalStr, 8);
|
||||
@ -1266,23 +1269,28 @@ export default class Tokenizer extends LocationParser {
|
||||
// as a micro-optimization.
|
||||
|
||||
readWord1(): string {
|
||||
let word = "";
|
||||
this.state.containsEsc = false;
|
||||
let word = "",
|
||||
first = true,
|
||||
chunkStart = this.state.pos;
|
||||
while (this.state.pos < this.input.length) {
|
||||
const ch = this.input.codePointAt(this.state.pos);
|
||||
const start = this.state.pos;
|
||||
let chunkStart = this.state.pos;
|
||||
|
||||
while (this.state.pos < this.state.length) {
|
||||
const ch = this.state.input.codePointAt(this.state.pos);
|
||||
if (isIdentifierChar(ch)) {
|
||||
this.state.pos += ch <= 0xffff ? 1 : 2;
|
||||
} else if (this.state.isIterator && ch === charCodes.atSign) {
|
||||
this.state.pos += 1;
|
||||
++this.state.pos;
|
||||
} else if (ch === charCodes.backslash) {
|
||||
this.state.containsEsc = true;
|
||||
|
||||
word += this.input.slice(chunkStart, this.state.pos);
|
||||
word += this.state.input.slice(chunkStart, this.state.pos);
|
||||
const escStart = this.state.pos;
|
||||
const identifierCheck =
|
||||
this.state.pos === start ? isIdentifierStart : isIdentifierChar;
|
||||
|
||||
if (this.input.charCodeAt(++this.state.pos) !== charCodes.lowercaseU) {
|
||||
if (
|
||||
this.state.input.charCodeAt(++this.state.pos) !== charCodes.lowercaseU
|
||||
) {
|
||||
this.raise(
|
||||
this.state.pos,
|
||||
"Expecting Unicode escape sequence \\uXXXX",
|
||||
@ -1291,8 +1299,11 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
++this.state.pos;
|
||||
const esc = this.readCodePoint(true);
|
||||
|
||||
if (
|
||||
// $FlowFixMe (thinks esc may be null, but throwOnInvalid is true)
|
||||
if (!(first ? isIdentifierStart : isIdentifierChar)(esc, true)) {
|
||||
!identifierCheck(esc, true)
|
||||
) {
|
||||
this.raise(escStart, "Invalid Unicode escape");
|
||||
}
|
||||
|
||||
@ -1302,9 +1313,8 @@ export default class Tokenizer extends LocationParser {
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
first = false;
|
||||
}
|
||||
return word + this.input.slice(chunkStart, this.state.pos);
|
||||
return word + this.state.input.slice(chunkStart, this.state.pos);
|
||||
}
|
||||
|
||||
isIterator(word: string): boolean {
|
||||
@ -1316,16 +1326,12 @@ export default class Tokenizer extends LocationParser {
|
||||
|
||||
readWord(): void {
|
||||
const word = this.readWord1();
|
||||
let type = tt.name;
|
||||
const type = keywordTypes[word] || tt.name;
|
||||
|
||||
if (this.isKeyword(word)) {
|
||||
if (this.state.containsEsc) {
|
||||
if (type.keyword && this.state.containsEsc) {
|
||||
this.raise(this.state.pos, `Escape sequence in keyword ${word}`);
|
||||
}
|
||||
|
||||
type = keywordTypes[word];
|
||||
}
|
||||
|
||||
// Allow @@iterator and @@asyncIterator as a identifier only inside type
|
||||
if (
|
||||
this.state.isIterator &&
|
||||
@ -1358,7 +1364,7 @@ export default class Tokenizer extends LocationParser {
|
||||
(prevType === tt.name && this.state.exprAllowed)
|
||||
) {
|
||||
return lineBreak.test(
|
||||
this.input.slice(this.state.lastTokEnd, this.state.start),
|
||||
this.state.input.slice(this.state.lastTokEnd, this.state.start),
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@ -24,6 +24,7 @@ type TopicContextState = {
|
||||
export default class State {
|
||||
strict: boolean;
|
||||
input: string;
|
||||
length: number;
|
||||
|
||||
curLine: number;
|
||||
|
||||
@ -37,6 +38,7 @@ export default class State {
|
||||
options.strictMode === false ? false : options.sourceType === "module";
|
||||
|
||||
this.input = input;
|
||||
this.length = input.length;
|
||||
|
||||
this.curLine = options.startLine;
|
||||
this.startLoc = this.endLoc = this.curPosition();
|
||||
@ -176,7 +178,9 @@ export default class State {
|
||||
|
||||
clone(skipArrays?: boolean): State {
|
||||
const state = new State();
|
||||
Object.keys(this).forEach(key => {
|
||||
const keys = Object.keys(this);
|
||||
for (let i = 0, length = keys.length; i < length; i++) {
|
||||
const key = keys[i];
|
||||
// $FlowIgnore
|
||||
let val = this[key];
|
||||
|
||||
@ -186,7 +190,8 @@ export default class State {
|
||||
|
||||
// $FlowIgnore
|
||||
state[key] = val;
|
||||
});
|
||||
}
|
||||
|
||||
return state;
|
||||
}
|
||||
}
|
||||
|
||||
@ -133,20 +133,20 @@ export const types: { [name: string]: TokenType } = {
|
||||
incDec: new TokenType("++/--", { prefix, postfix, startsExpr }),
|
||||
bang: new TokenType("!", { beforeExpr, prefix, startsExpr }),
|
||||
tilde: new TokenType("~", { beforeExpr, prefix, startsExpr }),
|
||||
pipeline: new BinopTokenType("|>", 0),
|
||||
nullishCoalescing: new BinopTokenType("??", 1),
|
||||
logicalOR: new BinopTokenType("||", 1),
|
||||
logicalAND: new BinopTokenType("&&", 2),
|
||||
bitwiseOR: new BinopTokenType("|", 3),
|
||||
bitwiseXOR: new BinopTokenType("^", 4),
|
||||
bitwiseAND: new BinopTokenType("&", 5),
|
||||
equality: new BinopTokenType("==/!=", 6),
|
||||
relational: new BinopTokenType("</>", 7),
|
||||
bitShift: new BinopTokenType("<</>>", 8),
|
||||
pipeline: BinopTokenType("|>", 0),
|
||||
nullishCoalescing: BinopTokenType("??", 1),
|
||||
logicalOR: BinopTokenType("||", 1),
|
||||
logicalAND: BinopTokenType("&&", 2),
|
||||
bitwiseOR: BinopTokenType("|", 3),
|
||||
bitwiseXOR: BinopTokenType("^", 4),
|
||||
bitwiseAND: BinopTokenType("&", 5),
|
||||
equality: BinopTokenType("==/!=", 6),
|
||||
relational: BinopTokenType("</>", 7),
|
||||
bitShift: BinopTokenType("<</>>", 8),
|
||||
plusMin: new TokenType("+/-", { beforeExpr, binop: 9, prefix, startsExpr }),
|
||||
modulo: new BinopTokenType("%", 10),
|
||||
star: new BinopTokenType("*", 10),
|
||||
slash: new BinopTokenType("/", 10),
|
||||
modulo: BinopTokenType("%", 10),
|
||||
star: BinopTokenType("*", 10),
|
||||
slash: BinopTokenType("/", 10),
|
||||
exponent: new TokenType("**", {
|
||||
beforeExpr,
|
||||
binop: 11,
|
||||
@ -154,45 +154,53 @@ export const types: { [name: string]: TokenType } = {
|
||||
}),
|
||||
};
|
||||
|
||||
export const keywords = {
|
||||
break: new KeywordTokenType("break"),
|
||||
case: new KeywordTokenType("case", { beforeExpr }),
|
||||
catch: new KeywordTokenType("catch"),
|
||||
continue: new KeywordTokenType("continue"),
|
||||
debugger: new KeywordTokenType("debugger"),
|
||||
default: new KeywordTokenType("default", { beforeExpr }),
|
||||
do: new KeywordTokenType("do", { isLoop, beforeExpr }),
|
||||
else: new KeywordTokenType("else", { beforeExpr }),
|
||||
finally: new KeywordTokenType("finally"),
|
||||
for: new KeywordTokenType("for", { isLoop }),
|
||||
function: new KeywordTokenType("function", { startsExpr }),
|
||||
if: new KeywordTokenType("if"),
|
||||
return: new KeywordTokenType("return", { beforeExpr }),
|
||||
switch: new KeywordTokenType("switch"),
|
||||
throw: new KeywordTokenType("throw", { beforeExpr, prefix, startsExpr }),
|
||||
try: new KeywordTokenType("try"),
|
||||
var: new KeywordTokenType("var"),
|
||||
let: new KeywordTokenType("let"),
|
||||
const: new KeywordTokenType("const"),
|
||||
while: new KeywordTokenType("while", { isLoop }),
|
||||
with: new KeywordTokenType("with"),
|
||||
new: new KeywordTokenType("new", { beforeExpr, startsExpr }),
|
||||
this: new KeywordTokenType("this", { startsExpr }),
|
||||
super: new KeywordTokenType("super", { startsExpr }),
|
||||
class: new KeywordTokenType("class", { startsExpr }),
|
||||
extends: new KeywordTokenType("extends", { beforeExpr }),
|
||||
export: new KeywordTokenType("export"),
|
||||
import: new KeywordTokenType("import", { startsExpr }),
|
||||
yield: new KeywordTokenType("yield", { beforeExpr, startsExpr }),
|
||||
null: new KeywordTokenType("null", { startsExpr }),
|
||||
true: new KeywordTokenType("true", { startsExpr }),
|
||||
false: new KeywordTokenType("false", { startsExpr }),
|
||||
in: new KeywordTokenType("in", { beforeExpr, binop: 7 }),
|
||||
instanceof: new KeywordTokenType("instanceof", { beforeExpr, binop: 7 }),
|
||||
typeof: new KeywordTokenType("typeof", { beforeExpr, prefix, startsExpr }),
|
||||
void: new KeywordTokenType("void", { beforeExpr, prefix, startsExpr }),
|
||||
delete: new KeywordTokenType("delete", { beforeExpr, prefix, startsExpr }),
|
||||
};
|
||||
function makeKeywordProps(
|
||||
name: string,
|
||||
conf: any,
|
||||
): PropertyDescriptor<TokenType> {
|
||||
return { value: KeywordTokenType(name, conf), enumerable: true };
|
||||
}
|
||||
|
||||
// $FlowIssue
|
||||
export const keywords = Object.create(null, {
|
||||
break: makeKeywordProps("break"),
|
||||
case: makeKeywordProps("case", { beforeExpr }),
|
||||
catch: makeKeywordProps("catch"),
|
||||
continue: makeKeywordProps("continue"),
|
||||
debugger: makeKeywordProps("debugger"),
|
||||
default: makeKeywordProps("default", { beforeExpr }),
|
||||
do: makeKeywordProps("do", { isLoop, beforeExpr }),
|
||||
else: makeKeywordProps("else", { beforeExpr }),
|
||||
finally: makeKeywordProps("finally"),
|
||||
for: makeKeywordProps("for", { isLoop }),
|
||||
function: makeKeywordProps("function", { startsExpr }),
|
||||
if: makeKeywordProps("if"),
|
||||
return: makeKeywordProps("return", { beforeExpr }),
|
||||
switch: makeKeywordProps("switch"),
|
||||
throw: makeKeywordProps("throw", { beforeExpr, prefix, startsExpr }),
|
||||
try: makeKeywordProps("try"),
|
||||
var: makeKeywordProps("var"),
|
||||
let: makeKeywordProps("let"),
|
||||
const: makeKeywordProps("const"),
|
||||
while: makeKeywordProps("while", { isLoop }),
|
||||
with: makeKeywordProps("with"),
|
||||
new: makeKeywordProps("new"),
|
||||
this: makeKeywordProps("this", { startsExpr }),
|
||||
super: makeKeywordProps("super", { startsExpr }),
|
||||
class: makeKeywordProps("class", { startsExpr }),
|
||||
extends: makeKeywordProps("extends", { beforeExpr }),
|
||||
export: makeKeywordProps("export"),
|
||||
import: makeKeywordProps("import", { startsExpr }),
|
||||
yield: makeKeywordProps("yield", { beforeExpr, startsExpr }),
|
||||
null: makeKeywordProps("null", { startsExpr }),
|
||||
true: makeKeywordProps("true", { startsExpr }),
|
||||
false: makeKeywordProps("false", { startsExpr }),
|
||||
in: makeKeywordProps("in", { beforeExpr, binop: 7 }),
|
||||
instanceof: makeKeywordProps("instanceof", { beforeExpr, binop: 7 }),
|
||||
typeof: makeKeywordProps("typeof", { beforeExpr, prefix, startsExpr }),
|
||||
void: makeKeywordProps("void", { beforeExpr, prefix, startsExpr }),
|
||||
delete: makeKeywordProps("delete", { beforeExpr, prefix, startsExpr }),
|
||||
});
|
||||
|
||||
// Map keyword names to token types.
|
||||
Object.keys(keywords).forEach(name => {
|
||||
|
||||
@ -4,28 +4,72 @@
|
||||
|
||||
import * as charCodes from "charcodes";
|
||||
|
||||
function makePredicate(words: string): (str: string) => boolean {
|
||||
const wordsArr = words.split(" ");
|
||||
return function(str) {
|
||||
return wordsArr.indexOf(str) >= 0;
|
||||
export const isES2015ReservedWord = (word: string): boolean => {
|
||||
return word === "enum" || word === "await";
|
||||
};
|
||||
|
||||
const reservedWordsStrict = new Set([
|
||||
"implements",
|
||||
"interface",
|
||||
"let",
|
||||
"package",
|
||||
"private",
|
||||
"protected",
|
||||
"public",
|
||||
"static",
|
||||
"yield",
|
||||
]);
|
||||
export function isStrictReservedWord(word: string): boolean {
|
||||
return reservedWordsStrict.has(word);
|
||||
}
|
||||
|
||||
// Reserved word lists for various dialects of the language
|
||||
export function isStrictBindReservedWord(word: string): boolean {
|
||||
return word === "eval" || word === "arguments";
|
||||
}
|
||||
|
||||
export const reservedWords = {
|
||||
"6": makePredicate("enum await"),
|
||||
strict: makePredicate(
|
||||
"implements interface let package private protected public static yield",
|
||||
),
|
||||
strictBind: makePredicate("eval arguments"),
|
||||
};
|
||||
const keywords = new Set([
|
||||
"break",
|
||||
"case",
|
||||
"catch",
|
||||
"continue",
|
||||
"debugger",
|
||||
"default",
|
||||
"do",
|
||||
"else",
|
||||
"finally",
|
||||
"for",
|
||||
"function",
|
||||
"if",
|
||||
"return",
|
||||
"switch",
|
||||
"throw",
|
||||
"try",
|
||||
"var",
|
||||
"while",
|
||||
"with",
|
||||
"null",
|
||||
"true",
|
||||
"false",
|
||||
"instanceof",
|
||||
"typeof",
|
||||
"void",
|
||||
"delete",
|
||||
"new",
|
||||
"in",
|
||||
"this",
|
||||
"let",
|
||||
"const",
|
||||
"class",
|
||||
"extends",
|
||||
"export",
|
||||
"import",
|
||||
"yield",
|
||||
"super",
|
||||
]);
|
||||
|
||||
// And the keywords
|
||||
|
||||
export const isKeyword = makePredicate(
|
||||
"break case catch continue debugger default do else finally for function if return switch throw try var while with null true false instanceof typeof void delete new in this let const class extends export import yield super",
|
||||
);
|
||||
export function isKeyword(word: string): boolean {
|
||||
return keywords.has(word);
|
||||
}
|
||||
|
||||
// ## Character categories
|
||||
|
||||
@ -64,7 +108,7 @@ const astralIdentifierCodes = [509,0,227,0,150,4,294,9,1368,2,2,1,6,3,41,2,5,0,1
|
||||
// rare.
|
||||
function isInAstralSet(code: number, set: $ReadOnlyArray<number>): boolean {
|
||||
let pos = 0x10000;
|
||||
for (let i = 0; i < set.length; i += 2) {
|
||||
for (let i = 0, length = set.length; i < length; i += 2) {
|
||||
pos += set[i];
|
||||
if (pos > code) return false;
|
||||
|
||||
|
||||
@ -4,7 +4,7 @@ import * as charCodes from "charcodes";
|
||||
|
||||
// Matches a whole line break (where CRLF is considered a single
|
||||
// line break). Used to count lines.
|
||||
export const lineBreak = /\r\n?|\n|\u2028|\u2029/;
|
||||
export const lineBreak = /\r\n?|[\n\u2028\u2029]/;
|
||||
export const lineBreakG = new RegExp(lineBreak.source, "g");
|
||||
|
||||
// https://tc39.github.io/ecma262/#sec-line-terminators
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
{
|
||||
"sourceType": "module",
|
||||
"throws": "Unexpected token, expected \"function\" (1:21)"
|
||||
"throws": "Unexpected token, expected \"=>\" (1:31)"
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ function ppJSON(v) {
|
||||
}
|
||||
|
||||
function addPath(str, pt) {
|
||||
if (str.charAt(str.length - 1) == ")") {
|
||||
if (str.charAt(str.length - 1) === ")") {
|
||||
return str.slice(0, str.length - 1) + "/" + pt + ")";
|
||||
} else {
|
||||
return str + " (" + pt + ")";
|
||||
|
||||
18
packages/babel-parser/test/unit/util/identifier.js
Normal file
18
packages/babel-parser/test/unit/util/identifier.js
Normal file
@ -0,0 +1,18 @@
|
||||
import { isKeyword } from "../../../src/util/identifier";
|
||||
|
||||
describe("identifier", () => {
|
||||
describe("isKeyword", () => {
|
||||
it("break is a keyword", () => {
|
||||
expect(isKeyword("break")).toBe(true);
|
||||
});
|
||||
it("let is a keyword", () => {
|
||||
expect(isKeyword("let")).toBe(true);
|
||||
});
|
||||
it("super is a keyword", () => {
|
||||
expect(isKeyword("super")).toBe(true);
|
||||
});
|
||||
it("abc is not a keyword", () => {
|
||||
expect(isKeyword("abc")).toBe(false);
|
||||
});
|
||||
});
|
||||
});
|
||||
@ -26,7 +26,6 @@ decorators/migrated_0003.js
|
||||
decorators/migrated_0007.js
|
||||
private_class_properties/multiple.js
|
||||
private_class_properties/super.js
|
||||
types/annotations/void_is_reserved_param.js
|
||||
types/member/reserved_words.js
|
||||
types/parameter_defaults/migrated_0032.js
|
||||
class_method_kinds/polymorphic_getter.js
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user