diff --git a/acorn.js b/acorn.js index e9374e008b..ebd9ed4d18 100644 --- a/acorn.js +++ b/acorn.js @@ -30,10 +30,8 @@ exports.parse = function(inpt, opts) { input = String(inpt); inputLen = input.length; - options = opts || {}; - for (var opt in defaultOptions) if (!options.hasOwnProperty(opt)) - options[opt] = defaultOptions[opt]; - sourceFile = options.sourceFile || null; + setOptions(opts); + initTokenState(); return parseTopLevel(options.program); }; @@ -90,6 +88,13 @@ sourceFile: null }; + function setOptions(opts) { + options = opts || {}; + for (var opt in defaultOptions) if (!options.hasOwnProperty(opt)) + options[opt] = defaultOptions[opt]; + sourceFile = options.sourceFile || null; + } + // The `getLineInfo` function is mostly useful when the // `locations` option is off (for performance reasons) and you // want to find the line/column position for a given character @@ -109,9 +114,41 @@ }; // Acorn is organized as a tokenizer and a recursive-descent parser. - // Both use (closure-)global variables to keep their state and - // communicate. We already saw the `options`, `input`, and - // `inputLen` variables above (set in `parse`). + // The `tokenize` export provides an interface to the tokenizer. + // Because the tokenizer is optimized for being efficiently used by + // the Acorn parser itself, this interface is somewhat crude and not + // very modular. Performing another parse or call to `tokenize` will + // reset the internal state, and invalidate existing tokenizers. + + exports.tokenize = function(inpt, opts) { + input = String(inpt); inputLen = input.length; + setOptions(opts); + initTokenState(); + + var t = {}; + function getToken(forceRegexp) { + readToken(forceRegexp); + t.start = tokStart; t.end = tokEnd; + t.startLoc = tokStartLoc; t.endLoc = tokEndLoc; + t.type = tokType; t.value = tokVal; + return t; + } + getToken.jumpTo = function(pos) { + tokPos = pos; + if (options.locations) { + tokCurLine = tokLineStart = 0; + tokLineStartNext = nextLineStart(); + } + var ch = input.charAt(pos - 1); + tokRegexpAllowed = !ch || /[\[\{\(,;:?\/*=+\-~!|&%^<>]/.test(ch) || + /[enwfd]/.test(ch) && /\b(keywords|case|else|return|throw|new|in|(instance|type)of|delete|void)$/.test(input.slice(pos - 9, pos + 1)); + skipSpace(); + }; + return getToken; + }; + + // State is kept in (closure-)global variables. We already saw the + // `options`, `input`, and `inputLen` variables above. // The current position of the tokenizer in the input. @@ -270,6 +307,14 @@ var _bin7 = {binop: 7, beforeExpr: true}, _bin8 = {binop: 8, beforeExpr: true}; var _bin10 = {binop: 10, beforeExpr: true}; + // Provide access to the token types for external users of the + // tokenizer. + + exports.tokTypes = {bracketL: _bracketL, bracketR: _bracketR, braceL: _braceL, braceR: _braceR, + parenL: _parenL, parenR: _parenR, comma: _comma, semi: _semi, colon: _colon, + dot: _dot, question: _question, slash: _slash, eq: _eq}; + for (var kw in keywordTypes) exports.tokTypes[kw] = keywordTypes[kw]; + // This is a trick taken from Esprima. It turns out that, on // non-Chrome browsers, to check whether a string is in a set, a // predicate containing a big ugly `switch` statement is faster than @@ -996,7 +1041,6 @@ // to its body instead of creating a new node. function parseTopLevel(program) { - initTokenState(); lastStart = lastEnd = tokPos; if (options.locations) lastEndLoc = curLineLoc(); inFunction = strict = null; diff --git a/index.html b/index.html index af26d2a798..5b682d79bd 100644 --- a/index.html +++ b/index.html @@ -21,10 +21,8 @@ API, with the caveat that the SpiderMonkey-specific syntax exports.parse = function(inpt, opts) { input = String(inpt); inputLen = input.length; - options = opts || {}; - for (var opt in defaultOptions) if (!options.hasOwnProperty(opt)) - options[opt] = defaultOptions[opt]; - sourceFile = options.sourceFile || null; + setOptions(opts); + initTokenState(); return parseTopLevel(options.program); };
A second optional argument can be given to further configure the parser process. These options are recognized:
var defaultOptions = exports.defaultOptions = {ecmaVersion indicates the ECMAScript version to parse. Must
@@ -55,7 +53,14 @@ passing the tree produced by parsing the first file as
toplevel forms of the parsed file to the Program (top) node
of an existing parse tree.
program: null,When location is on, you can pass this to record the source
file in every node's loc object.
sourceFile: null
- };The getLineInfo function is mostly useful when the
+ };
+
+ function setOptions(opts) {
+ options = opts || {};
+ for (var opt in defaultOptions) if (!options.hasOwnProperty(opt))
+ options[opt] = defaultOptions[opt];
+ sourceFile = options.sourceFile || null;
+ }
The getLineInfo function is mostly useful when the
locations option is off (for performance reasons) and you
want to find the line/column position for a given character
offset. input should be the code string that the offset refers
@@ -70,27 +75,55 @@ into.
Acorn is organized as a tokenizer and a recursive-descent parser.
-Both use (closure-)global variables to keep their state and
-communicate. We already saw the options, input, and
-inputLen variables above (set in parse).
The current position of the tokenizer in the input.
var tokPos;The start and end offsets of the current token.
var tokStart, tokEnd;When options.locations is true, these hold objects
-containing the tokens start and end line/column pairs.
var tokStartLoc, tokEndLoc;The type and value of the current token. Token types are objects,
+The tokenize export provides an interface to the tokenizer.
+Because the tokenizer is optimized for being efficiently used by
+the Acorn parser itself, this interface is somewhat crude and not
+very modular. Performing another parse or call to tokenize will
+reset the internal state, and invalidate existing tokenizers.
exports.tokenize = function(inpt, opts) {
+ input = String(inpt); inputLen = input.length;
+ setOptions(opts);
+ initTokenState();
+
+ var t = {};
+ function getToken(forceRegexp) {
+ readToken(forceRegexp);
+ t.start = tokStart; t.end = tokEnd;
+ t.startLoc = tokStartLoc; t.endLoc = tokEndLoc;
+ t.type = tokType; t.value = tokVal;
+ return t;
+ }
+ getToken.jumpTo = function(pos) {
+ tokPos = pos;
+ if (options.locations) {
+ tokCurLine = tokLineStart = 0;
+ tokLineStartNext = nextLineStart();
+ }
+ var ch = input.charAt(pos - 1);
+ tokRegexpAllowed = !ch || /[\[\{\(,;:?\/*=+\-~!|&%^<>]/.test(ch) ||
+ /[enwfd]/.test(ch) && /\b(keywords|case|else|return|throw|new|in|(instance|type)of|delete|void)$/.test(input.slice(pos - 9, pos + 1));
+ skipSpace();
+ };
+ return getToken;
+ };State is kept in (closure-)global variables. We already saw the
+options, input, and inputLen variables above.
The current position of the tokenizer in the input.
var tokPos;The start and end offsets of the current token.
var tokStart, tokEnd;When options.locations is true, these hold objects
+containing the tokens start and end line/column pairs.
var tokStartLoc, tokEndLoc;The type and value of the current token. Token types are objects,
named by variables against which they can be compared, and
holding properties that describe them (indicating, for example,
the precedence of an infix operator, and the original name of a
keyword token). The kind of value that's held in tokVal depends
on the type of the token. For literals, it is the literal value,
-for operators, the operator name, and so on.
var tokType, tokVal;Interal state for the tokenizer. To distinguish between division +for operators, the operator name, and so on.
var tokType, tokVal;Interal state for the tokenizer. To distinguish between division
operators and regular expressions, it remembers whether the last
token was one that is allowed to be followed by an expression.
(If it is, a slash is probably a regexp, if it isn't it's a
division operator. See the parseStatement function for a
-caveat.)
var tokRegexpAllowed;When options.locations is true, these are used to keep
+caveat.)
var tokRegexpAllowed;When options.locations is true, these are used to keep
track of the current line, and know when a new line has been
-entered. See the curLineLoc function.
var tokCurLine, tokLineStart, tokLineStartNext;These store the position of the previous token, which is useful
-when finishing a node and assigning its end position.
var lastStart, lastEnd, lastEndLoc;This is the parser's state. inFunction is used to reject
+entered. See the curLineLoc function.
var tokCurLine, tokLineStart, tokLineStartNext;These store the position of the previous token, which is useful
+when finishing a node and assigning its end position.
var lastStart, lastEnd, lastEndLoc;This is the parser's state. inFunction is used to reject
return statements outside of functions, labels to verify that
break and continue have somewhere to jump to, and strict
-indicates whether strict mode is on.
var inFunction, labels, strict;This function is used to raise exceptions on parse errors. It +indicates whether strict mode is on.
var inFunction, labels, strict;This function is used to raise exceptions on parse errors. It
takes an offset integer (into the current input) to indicate
the location of the error, attaches the position to the end
of the error message, and then raises a SyntaxError with that
@@ -100,12 +133,12 @@ message.
The assignment of fine-grained, information-carrying type objects + }
The assignment of fine-grained, information-carrying type objects allows the tokenizer to store the information it has about a -token in a way that is very cheap for the parser to look up.
All token type variables start with an underscore, to make them -easy to recognize.
These are the general types. The type property is only used to
+token in a way that is very cheap for the parser to look up.
All token type variables start with an underscore, to make them +easy to recognize.
These are the general types. The type property is only used to
make them recognizeable when debugging.
var _num = {type: "num"}, _regexp = {type: "regexp"}, _string = {type: "string"};
- var _name = {type: "name"}, _eof = {type: "eof"};Keyword tokens. The keyword property (also used in keyword-like
+ var _name = {type: "name"}, _eof = {type: "eof"};
Keyword tokens. The keyword property (also used in keyword-like
operators) indicates that the token originated from an
identifier-like word, which is used when parsing property names.
The keywords that denote values.
var _null = {keyword: "null", atomValue: null}, _true = {keyword: "true", atomValue: true};
- var _false = {keyword: "false", atomValue: false};Some keywords are treated as regular operators. in sometimes
+ var _this = {keyword: "this"};
The keywords that denote values.
var _null = {keyword: "null", atomValue: null}, _true = {keyword: "true", atomValue: true};
+ var _false = {keyword: "false", atomValue: false};Some keywords are treated as regular operators. in sometimes
(when parsing for) needs to be tested against specifically, so
-we assign a variable name to it for quick comparing.
var _in = {keyword: "in", binop: 7, beforeExpr: true};Map keyword names to token types.
var keywordTypes = {"break": _break, "case": _case, "catch": _catch,
+we assign a variable name to it for quick comparing. var _in = {keyword: "in", binop: 7, beforeExpr: true};Map keyword names to token types.
var keywordTypes = {"break": _break, "case": _case, "catch": _catch,
"continue": _continue, "debugger": _debugger, "default": _default,
"do": _do, "else": _else, "finally": _finally, "for": _for,
"function": _function, "if": _if, "return": _return, "switch": _switch,
@@ -135,10 +168,10 @@ we assign a variable name to it for quick comparing. Punctuation token types. Again, the type property is purely for debugging.
var _bracketL = {type: "[", beforeExpr: true}, _bracketR = {type: "]"}, _braceL = {type: "{", beforeExpr: true};
+ "delete": {keyword: "delete", prefix: true, beforeExpr: true}};Punctuation token types. Again, the type property is purely for debugging.
var _bracketL = {type: "[", beforeExpr: true}, _bracketR = {type: "]"}, _braceL = {type: "{", beforeExpr: true};
var _braceR = {type: "}"}, _parenL = {type: "(", beforeExpr: true}, _parenR = {type: ")"};
var _comma = {type: ",", beforeExpr: true}, _semi = {type: ";", beforeExpr: true};
- var _colon = {type: ":", beforeExpr: true}, _dot = {type: "."}, _question = {type: "?", beforeExpr: true};Operators. These carry several kinds of properties to help the + var _colon = {type: ":", beforeExpr: true}, _dot = {type: "."}, _question = {type: "?", beforeExpr: true};
Operators. These carry several kinds of properties to help the parser use them properly (the presence of these properties is what categorizes them as operators).
@@ -159,7 +192,11 @@ in AssignmentExpression nodes.This is a trick taken from Esprima. It turns out that, on + var _bin10 = {binop: 10, beforeExpr: true};
Provide access to the token types for external users of the +tokenizer.
exports.tokTypes = {bracketL: _bracketL, bracketR: _bracketR, braceL: _braceL, braceR: _braceR,
+ parenL: _parenL, parenR: _parenR, comma: _comma, semi: _semi, colon: _colon,
+ dot: _dot, question: _question, slash: _slash, eq: _eq};
+ for (var kw in keywordTypes) exports.tokTypes[kw] = keywordTypes[kw];This is a trick taken from Esprima. It turns out that, on
non-Chrome browsers, to check whether a string is in a set, a
predicate containing a big ugly switch statement is faster than
a regular expression, and on Chrome the two are about on par.
@@ -182,7 +219,7 @@ predicate from a space-separated string of words.
When there are more than three length categories, an outer + }
When there are more than three length categories, an outer switch first dispatches on the lengths, to save on comparisons.
if (cats.length > 3) {
cats.sort(function(a, b) {return b.length - a.length;});
f += "switch(str.length){";
@@ -191,25 +228,25 @@ switch first dispatches on the lengths, to save on comparisons.
f += "case " + cat[0].length + ":";
compareTo(cat);
}
- f += "}";Otherwise, simply generate a flat switch statement.
} else {
+ f += "}";Otherwise, simply generate a flat switch statement.
} else {
compareTo(words);
}
return new Function("str", f);
- }The ECMAScript 3 reserved word list.
var isReservedWord3 = makePredicate("abstract boolean byte char class double enum export extends final float goto implements import int interface long native package private protected public short static super synchronized throws transient volatile");ECMAScript 5 reserved words.
var isReservedWord5 = makePredicate("class enum extends super const export import");The additional reserved words in strict mode.
var isStrictReservedWord = makePredicate("implements interface let package private protected public static yield");The forbidden variable names in strict mode.
var isStrictBadIdWord = makePredicate("eval arguments");And the keywords.
var isKeyword = makePredicate("break case catch continue debugger default do else finally for function if return switch throw try var while with null true false instanceof typeof void delete new in this");Big ugly regular expressions that match characters in the + }
The ECMAScript 3 reserved word list.
var isReservedWord3 = makePredicate("abstract boolean byte char class double enum export extends final float goto implements import int interface long native package private protected public short static super synchronized throws transient volatile");ECMAScript 5 reserved words.
var isReservedWord5 = makePredicate("class enum extends super const export import");The additional reserved words in strict mode.
var isStrictReservedWord = makePredicate("implements interface let package private protected public static yield");The forbidden variable names in strict mode.
var isStrictBadIdWord = makePredicate("eval arguments");And the keywords.
var isKeyword = makePredicate("break case catch continue debugger default do else finally for function if return switch throw try var while with null true false instanceof typeof void delete new in this");Big ugly regular expressions that match characters in the whitespace, identifier, and identifier-start categories. These are only applied when a character is found to actually have a code point above 128.
var nonASCIIwhitespace = /[\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff]/;
var nonASCIIidentifierStartChars = "\xaa\xb5\xba\xc0-\xd6\xd8-\xf6\xf8-\u02c1\u02c6-\u02d1\u02e0-\u02e4\u02ec\u02ee\u0370-\u0374\u0376\u0377\u037a-\u037d\u0386\u0388-\u038a\u038c\u038e-\u03a1\u03a3-\u03f5\u03f7-\u0481\u048a-\u0527\u0531-\u0556\u0559\u0561-\u0587\u05d0-\u05ea\u05f0-\u05f2\u0620-\u064a\u066e\u066f\u0671-\u06d3\u06d5\u06e5\u06e6\u06ee\u06ef\u06fa-\u06fc\u06ff\u0710\u0712-\u072f\u074d-\u07a5\u07b1\u07ca-\u07ea\u07f4\u07f5\u07fa\u0800-\u0815\u081a\u0824\u0828\u0840-\u0858\u08a0\u08a2-\u08ac\u0904-\u0939\u093d\u0950\u0958-\u0961\u0971-\u0977\u0979-\u097f\u0985-\u098c\u098f\u0990\u0993-\u09a8\u09aa-\u09b0\u09b2\u09b6-\u09b9\u09bd\u09ce\u09dc\u09dd\u09df-\u09e1\u09f0\u09f1\u0a05-\u0a0a\u0a0f\u0a10\u0a13-\u0a28\u0a2a-\u0a30\u0a32\u0a33\u0a35\u0a36\u0a38\u0a39\u0a59-\u0a5c\u0a5e\u0a72-\u0a74\u0a85-\u0a8d\u0a8f-\u0a91\u0a93-\u0aa8\u0aaa-\u0ab0\u0ab2\u0ab3\u0ab5-\u0ab9\u0abd\u0ad0\u0ae0\u0ae1\u0b05-\u0b0c\u0b0f\u0b10\u0b13-\u0b28\u0b2a-\u0b30\u0b32\u0b33\u0b35-\u0b39\u0b3d\u0b5c\u0b5d\u0b5f-\u0b61\u0b71\u0b83\u0b85-\u0b8a\u0b8e-\u0b90\u0b92-\u0b95\u0b99\u0b9a\u0b9c\u0b9e\u0b9f\u0ba3\u0ba4\u0ba8-\u0baa\u0bae-\u0bb9\u0bd0\u0c05-\u0c0c\u0c0e-\u0c10\u0c12-\u0c28\u0c2a-\u0c33\u0c35-\u0c39\u0c3d\u0c58\u0c59\u0c60\u0c61\u0c85-\u0c8c\u0c8e-\u0c90\u0c92-\u0ca8\u0caa-\u0cb3\u0cb5-\u0cb9\u0cbd\u0cde\u0ce0\u0ce1\u0cf1\u0cf2\u0d05-\u0d0c\u0d0e-\u0d10\u0d12-\u0d3a\u0d3d\u0d4e\u0d60\u0d61\u0d7a-\u0d7f\u0d85-\u0d96\u0d9a-\u0db1\u0db3-\u0dbb\u0dbd\u0dc0-\u0dc6\u0e01-\u0e30\u0e32\u0e33\u0e40-\u0e46\u0e81\u0e82\u0e84\u0e87\u0e88\u0e8a\u0e8d\u0e94-\u0e97\u0e99-\u0e9f\u0ea1-\u0ea3\u0ea5\u0ea7\u0eaa\u0eab\u0ead-\u0eb0\u0eb2\u0eb3\u0ebd\u0ec0-\u0ec4\u0ec6\u0edc-\u0edf\u0f00\u0f40-\u0f47\u0f49-\u0f6c\u0f88-\u0f8c\u1000-\u102a\u103f\u1050-\u1055\u105a-\u105d\u1061\u1065\u1066\u106e-\u1070\u1075-\u1081\u108e\u10a0-\u10c5\u10c7\u10cd\u10d0-\u10fa\u10fc-\u1248\u124a-\u124d\u1250-\u1256\u1258\u125a-\u125d\u1260-\u1288\u128a-\u128d\u1290-\u12b0\u12b2-\u12b5\u12b8-\u12be\u12c0\u12c2-\u12c5\u12c8-\u12d6\u12d8-\u1310\u1312-\u1315\u1318-\u135a\u1380-\u138f\u13a0-\u13f4\u1401-\u166c\u166f-\u167f\u1681-\u169a\u16a0-\u16ea\u16ee-\u16f0\u1700-\u170c\u170e-\u1711\u1720-\u1731\u1740-\u1751\u1760-\u176c\u176e-\u1770\u1780-\u17b3\u17d7\u17dc\u1820-\u1877\u1880-\u18a8\u18aa\u18b0-\u18f5\u1900-\u191c\u1950-\u196d\u1970-\u1974\u1980-\u19ab\u19c1-\u19c7\u1a00-\u1a16\u1a20-\u1a54\u1aa7\u1b05-\u1b33\u1b45-\u1b4b\u1b83-\u1ba0\u1bae\u1baf\u1bba-\u1be5\u1c00-\u1c23\u1c4d-\u1c4f\u1c5a-\u1c7d\u1ce9-\u1cec\u1cee-\u1cf1\u1cf5\u1cf6\u1d00-\u1dbf\u1e00-\u1f15\u1f18-\u1f1d\u1f20-\u1f45\u1f48-\u1f4d\u1f50-\u1f57\u1f59\u1f5b\u1f5d\u1f5f-\u1f7d\u1f80-\u1fb4\u1fb6-\u1fbc\u1fbe\u1fc2-\u1fc4\u1fc6-\u1fcc\u1fd0-\u1fd3\u1fd6-\u1fdb\u1fe0-\u1fec\u1ff2-\u1ff4\u1ff6-\u1ffc\u2071\u207f\u2090-\u209c\u2102\u2107\u210a-\u2113\u2115\u2119-\u211d\u2124\u2126\u2128\u212a-\u212d\u212f-\u2139\u213c-\u213f\u2145-\u2149\u214e\u2160-\u2188\u2c00-\u2c2e\u2c30-\u2c5e\u2c60-\u2ce4\u2ceb-\u2cee\u2cf2\u2cf3\u2d00-\u2d25\u2d27\u2d2d\u2d30-\u2d67\u2d6f\u2d80-\u2d96\u2da0-\u2da6\u2da8-\u2dae\u2db0-\u2db6\u2db8-\u2dbe\u2dc0-\u2dc6\u2dc8-\u2dce\u2dd0-\u2dd6\u2dd8-\u2dde\u2e2f\u3005-\u3007\u3021-\u3029\u3031-\u3035\u3038-\u303c\u3041-\u3096\u309d-\u309f\u30a1-\u30fa\u30fc-\u30ff\u3105-\u312d\u3131-\u318e\u31a0-\u31ba\u31f0-\u31ff\u3400-\u4db5\u4e00-\u9fcc\ua000-\ua48c\ua4d0-\ua4fd\ua500-\ua60c\ua610-\ua61f\ua62a\ua62b\ua640-\ua66e\ua67f-\ua697\ua6a0-\ua6ef\ua717-\ua71f\ua722-\ua788\ua78b-\ua78e\ua790-\ua793\ua7a0-\ua7aa\ua7f8-\ua801\ua803-\ua805\ua807-\ua80a\ua80c-\ua822\ua840-\ua873\ua882-\ua8b3\ua8f2-\ua8f7\ua8fb\ua90a-\ua925\ua930-\ua946\ua960-\ua97c\ua984-\ua9b2\ua9cf\uaa00-\uaa28\uaa40-\uaa42\uaa44-\uaa4b\uaa60-\uaa76\uaa7a\uaa80-\uaaaf\uaab1\uaab5\uaab6\uaab9-\uaabd\uaac0\uaac2\uaadb-\uaadd\uaae0-\uaaea\uaaf2-\uaaf4\uab01-\uab06\uab09-\uab0e\uab11-\uab16\uab20-\uab26\uab28-\uab2e\uabc0-\uabe2\uac00-\ud7a3\ud7b0-\ud7c6\ud7cb-\ud7fb\uf900-\ufa6d\ufa70-\ufad9\ufb00-\ufb06\ufb13-\ufb17\ufb1d\ufb1f-\ufb28\ufb2a-\ufb36\ufb38-\ufb3c\ufb3e\ufb40\ufb41\ufb43\ufb44\ufb46-\ufbb1\ufbd3-\ufd3d\ufd50-\ufd8f\ufd92-\ufdc7\ufdf0-\ufdfb\ufe70-\ufe74\ufe76-\ufefc\uff21-\uff3a\uff41-\uff5a\uff66-\uffbe\uffc2-\uffc7\uffca-\uffcf\uffd2-\uffd7\uffda-\uffdc";
var nonASCIIidentifierChars = "\u0371-\u0374\u0483-\u0487\u0591-\u05bd\u05bf\u05c1\u05c2\u05c4\u05c5\u05c7\u0610-\u061a\u0620-\u0649\u0672-\u06d3\u06e7-\u06e8\u06fb-\u06fc\u0730-\u074a\u0800-\u0814\u081b-\u0823\u0825-\u0827\u0829-\u082d\u0840-\u0857\u08e4-\u08fe\u0900-\u0903\u093a-\u093c\u093e-\u094f\u0951-\u0957\u0962-\u0963\u0966-\u096f\u0981-\u0983\u09bc\u09be-\u09c4\u09c7\u09c8\u09d7\u09df-\u09e0\u0a01-\u0a03\u0a3c\u0a3e-\u0a42\u0a47\u0a48\u0a4b-\u0a4d\u0a51\u0a66-\u0a71\u0a75\u0a81-\u0a83\u0abc\u0abe-\u0ac5\u0ac7-\u0ac9\u0acb-\u0acd\u0ae2-\u0ae3\u0ae6-\u0aef\u0b01-\u0b03\u0b3c\u0b3e-\u0b44\u0b47\u0b48\u0b4b-\u0b4d\u0b56\u0b57\u0b5f-\u0b60\u0b66-\u0b6f\u0b82\u0bbe-\u0bc2\u0bc6-\u0bc8\u0bca-\u0bcd\u0bd7\u0be6-\u0bef\u0c01-\u0c03\u0c46-\u0c48\u0c4a-\u0c4d\u0c55\u0c56\u0c62-\u0c63\u0c66-\u0c6f\u0c82\u0c83\u0cbc\u0cbe-\u0cc4\u0cc6-\u0cc8\u0cca-\u0ccd\u0cd5\u0cd6\u0ce2-\u0ce3\u0ce6-\u0cef\u0d02\u0d03\u0d46-\u0d48\u0d57\u0d62-\u0d63\u0d66-\u0d6f\u0d82\u0d83\u0dca\u0dcf-\u0dd4\u0dd6\u0dd8-\u0ddf\u0df2\u0df3\u0e34-\u0e3a\u0e40-\u0e45\u0e50-\u0e59\u0eb4-\u0eb9\u0ec8-\u0ecd\u0ed0-\u0ed9\u0f18\u0f19\u0f20-\u0f29\u0f35\u0f37\u0f39\u0f41-\u0f47\u0f71-\u0f84\u0f86-\u0f87\u0f8d-\u0f97\u0f99-\u0fbc\u0fc6\u1000-\u1029\u1040-\u1049\u1067-\u106d\u1071-\u1074\u1082-\u108d\u108f-\u109d\u135d-\u135f\u170e-\u1710\u1720-\u1730\u1740-\u1750\u1772\u1773\u1780-\u17b2\u17dd\u17e0-\u17e9\u180b-\u180d\u1810-\u1819\u1920-\u192b\u1930-\u193b\u1951-\u196d\u19b0-\u19c0\u19c8-\u19c9\u19d0-\u19d9\u1a00-\u1a15\u1a20-\u1a53\u1a60-\u1a7c\u1a7f-\u1a89\u1a90-\u1a99\u1b46-\u1b4b\u1b50-\u1b59\u1b6b-\u1b73\u1bb0-\u1bb9\u1be6-\u1bf3\u1c00-\u1c22\u1c40-\u1c49\u1c5b-\u1c7d\u1cd0-\u1cd2\u1d00-\u1dbe\u1e01-\u1f15\u200c\u200d\u203f\u2040\u2054\u20d0-\u20dc\u20e1\u20e5-\u20f0\u2d81-\u2d96\u2de0-\u2dff\u3021-\u3028\u3099\u309a\ua640-\ua66d\ua674-\ua67d\ua69f\ua6f0-\ua6f1\ua7f8-\ua800\ua806\ua80b\ua823-\ua827\ua880-\ua881\ua8b4-\ua8c4\ua8d0-\ua8d9\ua8f3-\ua8f7\ua900-\ua909\ua926-\ua92d\ua930-\ua945\ua980-\ua983\ua9b3-\ua9c0\uaa00-\uaa27\uaa40-\uaa41\uaa4c-\uaa4d\uaa50-\uaa59\uaa7b\uaae0-\uaae9\uaaf2-\uaaf3\uabc0-\uabe1\uabec\uabed\uabf0-\uabf9\ufb20-\ufb28\ufe00-\ufe0f\ufe20-\ufe26\ufe33\ufe34\ufe4d-\ufe4f\uff10-\uff19\uff3f";
var nonASCIIidentifierStart = new RegExp("[" + nonASCIIidentifierStartChars + "]");
- var nonASCIIidentifier = new RegExp("[" + nonASCIIidentifierStartChars + nonASCIIidentifierChars + "]");Whether a single character denotes a newline.
var newline = /[\n\r\u2028\u2029]/;Matches a whole line break (where CRLF is considered a single -line break). Used to count lines.
var lineBreak = /\r\n|[\n\r\u2028\u2029]/g;Test whether a given character code starts an identifier.
function isIdentifierStart(code) {
+ var nonASCIIidentifier = new RegExp("[" + nonASCIIidentifierStartChars + nonASCIIidentifierChars + "]");Whether a single character denotes a newline.
var newline = /[\n\r\u2028\u2029]/;Matches a whole line break (where CRLF is considered a single +line break). Used to count lines.
var lineBreak = /\r\n|[\n\r\u2028\u2029]/g;Test whether a given character code starts an identifier.
function isIdentifierStart(code) {
if (code < 65) return code === 36;
if (code < 91) return true;
if (code < 97) return code === 95;
if (code < 123)return true;
return code >= 0xaa && nonASCIIidentifierStart.test(String.fromCharCode(code));
- }Test whether a given character is part of an identifier.
function isIdentifierChar(code) {
+ }Test whether a given character is part of an identifier.
function isIdentifierChar(code) {
if (code < 48) return code === 36;
if (code < 58) return true;
if (code < 65) return false;
@@ -217,7 +254,7 @@ line break). Used to count lines. These are used when options.locations is on, in order to track
+ }
These are used when options.locations is on, in order to track
the current line number and start of line offset, in order to set
tokStartLoc and tokEndLoc.
function nextLineStart() {
lineBreak.lastIndex = tokLineStart;
@@ -237,13 +274,13 @@ the current line number and start of line offset, in order to set
tokLineStartNext = nextLineStart();
}
return new line_loc_t();
- }Reset the token state. Used at the start of a parse.
function initTokenState() {
+ }Reset the token state. Used at the start of a parse.
function initTokenState() {
tokCurLine = 1;
tokPos = tokLineStart = 0;
tokLineStartNext = nextLineStart();
tokRegexpAllowed = true;
skipSpace();
- }Called at the end of every token. Sets tokEnd, tokVal, and
+ }
Called at the end of every token. Sets tokEnd, tokVal, and
tokRegexpAllowed, and skips the space after the token, so that
the next one's tokStart will point at the right position.
function finishToken(type, val) {
tokEnd = tokPos;
@@ -277,7 +314,7 @@ the next one's tokStart will point at the right position.
if (options.onComment)
options.onComment(false, input.slice(start + 2, tokPos - 1), start, tokPos,
startLoc, options.locations && curLineLoc());
- }Called at the start of the parse and after every token. Skips + }
Called at the start of the parse and after every token. Skips whitespace and comments, and.
function skipSpace() {
while (tokPos < inputLen) {
var ch = input.charCodeAt(tokPos);
@@ -296,7 +333,7 @@ whitespace and comments, and. This is the function that is called to fetch the next token. It + }
This is the function that is called to fetch the next token. It is somewhat obscure, because it works in character codes rather than characters, and because operator parsing has been inlined into it.
@@ -364,9 +401,9 @@ into it. } function getTokenFromCode(code) { - switch(code) {The interpretation of a dot depends on whether it is followed + switch(code) {
The interpretation of a dot depends on whether it is followed by a digit.
case 46: // '.'
- return readToken_dot(code);Punctuation tokens.
case 40: ++tokPos; return finishToken(_parenL);
+ return readToken_dot(code);Punctuation tokens.
case 40: ++tokPos; return finishToken(_parenL);
case 41: ++tokPos; return finishToken(_parenR);
case 59: ++tokPos; return finishToken(_semi);
case 44: ++tokPos; return finishToken(_comma);
@@ -375,12 +412,12 @@ by a digit. '0x' is a hexadecimal number.
case 48: // '0'
+ case 63: ++tokPos; return finishToken(_question);'0x' is a hexadecimal number.
case 48: // '0'
var next = input.charCodeAt(tokPos+1);
- if (next === 120 || next === 88) return readHexNumber();Anything else beginning with a digit is an integer, octal + if (next === 120 || next === 88) return readHexNumber();
Anything else beginning with a digit is an integer, octal number, or float.
case 49: case 50: case 51: case 52: case 53: case 54: case 55: case 56: case 57: // 1-9
- return readNumber(String.fromCharCode(code));Quotes produce strings.
case 34: case 39: // '"', "'"
- return readString(code);Operators are parsed inline in tiny state machines. '=' (61) is + return readNumber(String.fromCharCode(code));
Quotes produce strings.
case 34: case 39: // '"', "'"
+ return readString(code);Operators are parsed inline in tiny state machines. '=' (61) is
often referred to. finishOp simply skips the amount of
characters it is given as second argument, and returns a token
of the type given by its first argument.
case 47: // '/'
@@ -417,12 +454,12 @@ of the type given by its first argument. Identifier or keyword. '\uXXXX' sequences are allowed in + var code = input.charCodeAt(tokPos);
Identifier or keyword. '\uXXXX' sequences are allowed in identifiers, so '\' also dispatches to that.
if (isIdentifierStart(code) || code === 92 /* '\' */) return readWord();
var tok = getTokenFromCode(code);
- if (tok === false) {If we are here, we either found a non-ASCII identifier + if (tok === false) {
If we are here, we either found a non-ASCII identifier character, or something that's entirely disallowed.
var ch = String.fromCharCode(code);
if (ch === "\\" || nonASCIIidentifierStart.test(ch)) return readWord();
raise(tokPos, "Unexpected character '" + ch + "'");
@@ -434,7 +471,7 @@ character, or something that's entirely disallowed. Parse a regular expression. Some context-awareness is necessary, + }
Parse a regular expression. Some context-awareness is necessary, since a '/' inside a '[]' set does not end the expression.
function readRegexp() {
var content = "", escaped, inClass, start = tokPos;
for (;;) {
@@ -450,11 +487,11 @@ since a '/' inside a '[]' set does not end the expression. Need to use readWord1 because '\uXXXX' sequences are allowed
+ ++tokPos;
Need to use readWord1 because '\uXXXX' sequences are allowed
here (don't ask).
var mods = readWord1();
if (mods && !/^[gmsiy]*$/.test(mods)) raise(start, "Invalid regexp flag");
return finishToken(_regexp, new RegExp(content, mods));
- }Read an integer in the given radix. Return null if zero digits + }
Read an integer in the given radix. Return null if zero digits
were read, the integer value otherwise. When len is given, this
will return null unless the integer has exactly len digits.
function readInt(radix, len) {
var start = tokPos, total = 0;
@@ -479,7 +516,7 @@ will return null unless the integer has exactly len di
if (val == null) raise(tokStart + 2, "Expected hexadecimal number");
if (isIdentifierStart(input.charCodeAt(tokPos))) raise(tokPos, "Identifier directly after number");
return finishToken(_num, val);
- }Read an integer, octal integer, or floating-point number.
+ }Read an integer, octal integer, or floating-point number.
function readNumber(ch) {
var start = tokPos, isFloat = ch === ".";
if (!isFloat && readInt(10) == null) raise(start, "Invalid number");
@@ -503,7 +540,7 @@ will return null unless the integer has exactly len di
else if (/[89]/.test(str) || strict) raise(start, "Invalid number");
else val = parseInt(str, 8);
return finishToken(_num, val);
- }Read a string value, interpreting backslash-escapes.
var rs_str = [];
+ }Read a string value, interpreting backslash-escapes.
var rs_str = [];
function readString(quote) {
tokPos++;
@@ -549,13 +586,13 @@ will return null unless the integer has exactly len di
++tokPos;
}
}
- }Used to read character escape sequences ('\x', '\u', '\U').
function readHexChar(len) {
+ }Used to read character escape sequences ('\x', '\u', '\U').
function readHexChar(len) {
var n = readInt(16, len);
if (n === null) raise(tokStart, "Bad character escape sequence");
return n;
- }Used to signal to callers of readWord1 whether the word
+ }
Used to signal to callers of readWord1 whether the word
contained any escape sequences. This is needed because words with
-escape sequences must not be interpreted as keywords.
var containsEsc;Read an identifier, and return it as a string. Sets containsEsc
+escape sequences must not be interpreted as keywords.
var containsEsc;Read an identifier, and return it as a string. Sets containsEsc
to whether the word contained a '\u' escape.
Only builds up the word character-by-character when it actually @@ -585,7 +622,7 @@ containeds an escape, as a micro-optimization.
Read an identifier or keyword token. Will check for reserved + }
Read an identifier or keyword token. Will check for reserved words when necessary.
function readWord() {
var word = readWord1();
var type = _name;
@@ -597,7 +634,7 @@ words when necessary. A recursive descent parser operates by defining functions for all + }
A recursive descent parser operates by defining functions for all
syntactic elements, and recursively calling those, each function
advancing the input stream and returning an AST node. Precedence
of constructs (for example, the fact that !x[1] means !(x[1])
@@ -611,19 +648,19 @@ way, it'll receive the node for x[1] already parsed, and wraps
operator precedence, because it is much more compact than using
the technique outlined above, which uses different, nesting
functions to specify precedence, for all of the ten binary
-precedence levels that JavaScript defines.
Continue to the next token.
+precedence levels that JavaScript defines. Continue to the next token.
function next() {
lastStart = tokStart;
lastEnd = tokEnd;
lastEndLoc = tokEndLoc;
readToken();
- }Enter strict mode. Re-reads the next token to please pedantic + }
Enter strict mode. Re-reads the next token to please pedantic tests ("use strict"; 010; -- should fail).
function setStrict(strct) {
strict = strct;
tokPos = lastEnd;
skipSpace();
readToken();
- }Start an AST node, attaching a start offset.
function node_t(s) {
+ }Start an AST node, attaching a start offset.
function node_t(s) {
this.type = null;
this.start = tokStart;
this.end = null;
@@ -642,7 +679,7 @@ tests ("use strict"; 010; -- should fail). Start a node whose start offset information should be based on + }
Start a node whose start offset information should be based on the start of another node. For example, a binary operator node is only started after its left-hand side has already been parsed.
function startNodeFrom(other) {
var node = new node_t();
@@ -655,7 +692,7 @@ only started after its left-hand side has already been parsed. <
node.range = [other.range[0], 0];
return node;
- }Finish an AST node, adding type and end properties.
function finishNode(node, type) {
+ }Finish an AST node, adding type and end properties.
function finishNode(node, type) {
node.type = type;
node.end = lastEnd;
if (options.locations)
@@ -663,38 +700,37 @@ only started after its left-hand side has already been parsed. <
if (options.ranges)
node.range[1] = lastEnd;
return node;
- }Test whether a statement node is the string literal "use strict".
function isUseStrict(stmt) {
+ }Test whether a statement node is the string literal "use strict".
function isUseStrict(stmt) {
return options.ecmaVersion >= 5 && stmt.type === "ExpressionStatement" &&
stmt.expression.type === "Literal" && stmt.expression.value === "use strict";
- }Predicate that tests whether the next token is of the given + }
Predicate that tests whether the next token is of the given type, and if yes, consumes it as a side effect.
function eat(type) {
if (tokType === type) {
next();
return true;
}
- }Test whether a semicolon can be inserted at the current position.
function canInsertSemicolon() {
+ }Test whether a semicolon can be inserted at the current position.
function canInsertSemicolon() {
return !options.strictSemicolons &&
(tokType === _eof || tokType === _braceR || newline.test(input.slice(lastEnd, tokStart)));
- }Consume a semicolon, or, failing that, see if we are allowed to + }
Consume a semicolon, or, failing that, see if we are allowed to pretend that there is a semicolon at this position.
function semicolon() {
if (!eat(_semi) && !canInsertSemicolon()) unexpected();
- }Expect a token of a given type. If found, consume it, otherwise, + }
Expect a token of a given type. If found, consume it, otherwise, raise an unexpected token error.
function expect(type) {
if (tokType === type) next();
else unexpected();
- }Raise an unexpected token error.
function unexpected() {
+ }Raise an unexpected token error.
function unexpected() {
raise(tokStart, "Unexpected token");
- }Verify that a node is an lval — something that can be assigned + }
Verify that a node is an lval — something that can be assigned to.
function checkLVal(expr) {
if (expr.type !== "Identifier" && expr.type !== "MemberExpression")
raise(expr.start, "Assigning to rvalue");
if (strict && expr.type === "Identifier" && isStrictBadIdWord(expr.name))
raise(expr.start, "Assigning to " + expr.name + " in strict mode");
- }Parse a program. Initializes the parser, reads any number of + }
Parse a program. Initializes the parser, reads any number of
statements, and wraps them in a Program node. Optionally takes a
program argument. If present, the statements will be appended
to its body instead of creating a new node.
function parseTopLevel(program) {
- initTokenState();
lastStart = lastEnd = tokPos;
if (options.locations) lastEndLoc = curLineLoc();
inFunction = strict = null;
@@ -712,7 +748,7 @@ to its body instead of creating a new node. Parse a single statement.
+ var loopLabel = {kind: "loop"}, switchLabel = {kind: "switch"};Parse a single statement.
If expecting a statement and finding a slash operator, parse a regular expression literal. This is to handle cases like @@ -721,7 +757,7 @@ does not help.
Most types of statements are recognized by the keyword they + var starttype = tokType, node = startNode();
Most types of statements are recognized by the keyword they start with. Many are trivial to parse, some require a bit of complexity.
switch (starttype) {
case _break: case _continue:
@@ -732,7 +768,7 @@ complexity. Verify that there is an actual destination to break or + }
Verify that there is an actual destination to break or continue to.
for (var i = 0; i < labels.length; ++i) {
var lab = labels[i];
if (node.label == null || lab.name === node.label.name) {
@@ -756,7 +792,7 @@ continue to. Disambiguating between a for and a for/in loop is
+ return finishNode(node, "DoWhileStatement");
Disambiguating between a for and a for/in loop is
non-trivial. Basically, we have to parse the init var
statement or expression, disallowing the in operator (see
the second parameter to parseExpression), and then check
@@ -792,7 +828,7 @@ a regular for loop.
In return (and break/continue), the keywords with
+ next();
In return (and break/continue), the keywords with
optional arguments, we eagerly look for a semicolon or the
possibility to insert one.
if (eat(_semi) || canInsertSemicolon()) node.argument = null;
@@ -804,7 +840,7 @@ possibility to insert one. Statements under must be grouped (by label) in SwitchCase + labels.push(switchLabel);
Statements under must be grouped (by label) in SwitchCase
nodes. cur is used to keep the node that we are currently
adding statements to.
for (var cur, sawDefault; tokType != _braceR;) {
@@ -885,7 +921,7 @@ adding statements to. If the statement does not start with a statement keyword or a + return finishNode(node, "EmptyStatement");
If the statement does not start with a statement keyword or a brace, it's an ExpressionStatement or LabeledStatement. We simply start parsing an expression, and afterwards, if the next token is a colon and the expression was a simple @@ -906,13 +942,13 @@ Identifier node, we switch to interpreting it as a label.
Used for constructs like switch and if that insist on
+ }
Used for constructs like switch and if that insist on
parentheses around their expression.
function parseParenExpression() {
expect(_parenL);
var val = parseExpression();
expect(_parenR);
return val;
- }Parse a semicolon-enclosed block of statements, handling "use
+ }
Parse a semicolon-enclosed block of statements, handling "use
strict" declarations when allowStrict is true (used for
function bodies).
function parseBlock(allowStrict) {
var node = startNode(), first = true, strict = false, oldStrict;
@@ -929,7 +965,7 @@ function bodies). Parse a regular for loop. The disambiguation code in
+ }
Parse a regular for loop. The disambiguation code in
parseStatement will already have parsed the init statement or
expression.
function parseFor(node, init) {
node.init = init;
@@ -941,14 +977,14 @@ expression. Parse a for/in loop.
function parseForIn(node, init) {
+ }Parse a for/in loop.
function parseForIn(node, init) {
node.left = init;
node.right = parseExpression();
expect(_parenR);
node.body = parseStatement();
labels.pop();
return finishNode(node, "ForInStatement");
- }Parse a list of variable declarations.
function parseVar(node, noIn) {
+ }Parse a list of variable declarations.
function parseVar(node, noIn) {
node.declarations = [];
node.kind = "var";
for (;;) {
@@ -961,11 +997,11 @@ expression. These nest, from the most general expression type at the top to + }
These nest, from the most general expression type at the top to 'atomic', nondivisible expression types at the bottom. Most of the functions will simply let the function(s) below them parse, and, if the syntactic construct they handle is present, wrap -the AST node that the inner parser gave them in another node.
Parse a full expression. The arguments are used to forbid comma +the AST node that the inner parser gave them in another node.
Parse a full expression. The arguments are used to forbid comma
sequences (in argument lists, array literals, or object literals)
or the in operator (in for loops initalization expressions).
function parseExpression(noComma, noIn) {
var expr = parseMaybeAssign(noIn);
@@ -976,7 +1012,7 @@ or the in operator (in for loops initalization expressions).
return finishNode(node, "SequenceExpression");
}
return expr;
- }Parse an assignment expression. This includes applications of + }
Parse an assignment expression. This includes applications of
operators like +=.
function parseMaybeAssign(noIn) {
var left = parseMaybeConditional(noIn);
if (tokType.isAssign) {
@@ -989,7 +1025,7 @@ operators like +=. Parse a ternary conditional (?:) operator.
function parseMaybeConditional(noIn) {
+ }Parse a ternary conditional (?:) operator.
function parseMaybeConditional(noIn) {
var expr = parseExprOps(noIn);
if (eat(_question)) {
var node = startNodeFrom(expr);
@@ -1000,9 +1036,9 @@ operators like +=. Start the precedence parser.
function parseExprOps(noIn) {
+ }Start the precedence parser.
function parseExprOps(noIn) {
return parseExprOp(parseMaybeUnary(noIn), -1, noIn);
- }Parse binary operators with the operator precedence parsing + }
Parse binary operators with the operator precedence parsing
algorithm. left is the left-hand side of the operator.
minPrec provides context that allows the function to stop and
defer further parser to one of its callers when it encounters an
@@ -1020,7 +1056,7 @@ operator that has a lower precedence than the set it is parsing.
Parse unary operators, both prefix and postfix.
function parseMaybeUnary(noIn) {
+ }Parse unary operators, both prefix and postfix.
function parseMaybeUnary(noIn) {
if (tokType.prefix) {
var node = startNode(), update = tokType.isUpdate;
node.operator = tokVal;
@@ -1044,7 +1080,7 @@ operator that has a lower precedence than the set it is parsing.
expr = finishNode(node, "UpdateExpression");
}
return expr;
- }Parse call, dot, and []-subscript expressions.
function parseExprSubscripts() {
+ }Parse call, dot, and []-subscript expressions.
function parseExprSubscripts() {
return parseSubscripts(parseExprAtom());
}
@@ -1068,7 +1104,7 @@ operator that has a lower precedence than the set it is parsing.
node.arguments = parseExprList(_parenR, false);
return parseSubscripts(finishNode(node, "CallExpression"), noCalls);
} else return base;
- }Parse an atomic expression — either a single token that is an + }
Parse an atomic expression — either a single token that is an
expression, an expression started by a keyword like function or
new, or an expression wrapped in punctuation like (), [],
or {}.
function parseExprAtom() {
@@ -1128,7 +1164,7 @@ or {}. New's precedence is slightly tricky. It must allow its argument + }
New's precedence is slightly tricky. It must allow its argument
to be a [] or dot subscript expression, but not a call — at
least, not without wrapping it in parentheses. Thus, it uses the
function parseNew() {
var node = startNode();
@@ -1137,7 +1173,7 @@ least, not without wrapping it in parentheses. Thus, it uses the
if (eat(_parenL)) node.arguments = parseExprList(_parenR, false);
else node.arguments = [];
return finishNode(node, "NewExpression");
- }Parse an object literal.
function parseObj() {
+ }Parse an object literal.
function parseObj() {
var node = startNode(), first = true, sawGetSet = false;
node.properties = [];
next();
@@ -1158,7 +1194,7 @@ least, not without wrapping it in parentheses. Thus, it uses the
prop.key = parsePropertyName();
if (!tokType === _parenL) unexpected();
prop.value = parseFunction(startNode(), false);
- } else unexpected();getters and setters are not allowed to clash — either with + } else unexpected();
getters and setters are not allowed to clash — either with each other or with an init property — and in strict mode, init properties are also not allowed to be repeated.
if (prop.key.type === "Identifier" && (strict || sawGetSet)) {
for (var i = 0; i < node.properties.length; ++i) {
@@ -1179,7 +1215,7 @@ init properties are also not allowed to be repeated. Parse a function declaration or literal (depending on the + }
Parse a function declaration or literal (depending on the
isStatement parameter).
function parseFunction(node, isStatement) {
if (tokType === _name) node.id = parseIdent();
else if (isStatement) unexpected();
@@ -1190,11 +1226,11 @@ init properties are also not allowed to be repeated. Start a new scope with regard to labels and the inFunction
+ }
Start a new scope with regard to labels and the inFunction
flag (restore them to their old value afterwards).
var oldInFunc = inFunction, oldLabels = labels;
inFunction = true; labels = [];
node.body = parseBlock(true);
- inFunction = oldInFunc; labels = oldLabels;If this is a strict mode function, verify that argument names + inFunction = oldInFunc; labels = oldLabels;
If this is a strict mode function, verify that argument names
are not repeated, and it does not try to bind the words eval
or arguments.
if (strict || node.body.body.length && isUseStrict(node.body.body[0])) {
for (var i = node.id ? -1 : 0; i < node.params.length; ++i) {
@@ -1207,7 +1243,7 @@ or arguments. Parses a comma-separated list of expressions, and returns them as + }
Parses a comma-separated list of expressions, and returns them as
an array. close is the token type that ends the list, and
allowEmpty can be turned on to allow subsequent commas with
nothing in between them to be parsed as null (which is needed
@@ -1223,7 +1259,7 @@ for array literals).
Parse the next token as an identifier. If liberal is true (used
+ }
Parse the next token as an identifier. If liberal is true (used
when parsing properties), it will also convert keywords into
identifiers.
function parseIdent(liberal) {
var node = startNode();