Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Port babel-parser changes from 2022-01-10 to 2022-02-26 #716

Merged
merged 3 commits into from
Jul 7, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions generator/generateReadWordTree.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ const KEYWORDS = [

const CONTEXTUAL_KEYWORDS = [
"abstract",
"accessor",
"as",
"asserts",
"async",
Expand Down
6 changes: 5 additions & 1 deletion generator/generateTokenTypes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,8 @@ const types = {
lessThan: new BinopTokenType("<", 7),
greaterThan: new BinopTokenType(">", 7),
relationalOrEqual: new BinopTokenType("<=/>=", 7),
bitShift: new BinopTokenType("<</>>", 8),
bitShiftL: new BinopTokenType("<<", 8),
bitShiftR: new BinopTokenType(">>/>>>", 8),
plus: new TokenType("+", {binop: 9, prefix}),
minus: new TokenType("-", {binop: 9, prefix}),
modulo: new BinopTokenType("%", 10),
Expand Down Expand Up @@ -173,6 +174,9 @@ const types = {

export default function generateTokenTypes(): string {
let code = '// Generated file, do not edit! Run "yarn generate" to re-generate this file.\n';
// formatTokenType is trivial and used for debugging purposes, so we shouldn't
// need full test coverage.
code += "/* istanbul ignore file */\n";
code += generateTokenTypeEnum();
code += generateFormatTokenType();
return code;
Expand Down
40 changes: 28 additions & 12 deletions src/parser/plugins/typescript.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import {
eat,
finishToken,
IdentifierRole,
lookaheadType,
lookaheadTypeAndKeyword,
Expand Down Expand Up @@ -1095,6 +1096,25 @@ function tsTryParseGenericAsyncArrowFunction(): boolean {
return true;
}

/**
* If necessary, hack the tokenizer state so that this bitshift was actually a
* less-than token, then keep parsing. This should only be used in situations
* where we restore from snapshot on error (which reverts this change) or
* where bitshift would be illegal anyway (e.g. in a class "extends" clause).
*
* This hack is useful to handle situations like foo<<T>() => void>() where
* there can legitimately be two open-angle-brackets in a row in TS. This
* situation is very obscure and (as of this writing) is handled by Babel but
* not TypeScript itself, so it may be fine in the future to remove this case.
*/
function tsParseTypeArgumentsWithPossibleBitshift(): void {
if (state.type === tt.bitShiftL) {
state.pos -= 1;
finishToken(tt.lessThan);
}
tsParseTypeArguments();
}

function tsParseTypeArguments(): void {
const oldIsType = pushTypeContext(0);
expect(tt.lessThan);
Expand Down Expand Up @@ -1165,7 +1185,7 @@ export function tsParseSubscript(
return;
}

if (match(tt.lessThan)) {
if (match(tt.lessThan) || match(tt.bitShiftL)) {
// There are number of things we are going to "maybe" parse, like type arguments on
// tagged template expressions. If any of them fail, walk it back and continue.
const snapshot = state.snapshot();
Expand All @@ -1178,7 +1198,7 @@ export function tsParseSubscript(
return;
}
}
tsParseTypeArguments();
tsParseTypeArgumentsWithPossibleBitshift();
if (!noCalls && eat(tt.parenL)) {
// With f<T>(), the subscriptStartIndex marker is on the ( token.
state.tokens[state.tokens.length - 1].subscriptStartIndex = startTokenIndex;
Expand Down Expand Up @@ -1210,15 +1230,11 @@ export function tsParseSubscript(
}

export function tsStartParseNewArguments(): void {
if (match(tt.lessThan)) {
if (match(tt.lessThan) || match(tt.bitShiftL)) {
// 99% certain this is `new C<T>();`. But may be `new C < T;`, which is also legal.
const snapshot = state.snapshot();

state.type = tt.typeParameterStart;
tsParseTypeArguments();
if (!match(tt.parenL)) {
unexpected();
}
tsParseTypeArgumentsWithPossibleBitshift();

if (state.error) {
state.restoreFromSnapshot(snapshot);
Expand Down Expand Up @@ -1431,8 +1447,8 @@ export function tsParseExportDeclaration(): void {
}

export function tsAfterParseClassSuper(hasSuper: boolean): void {
if (hasSuper && match(tt.lessThan)) {
tsParseTypeArguments();
if (hasSuper && (match(tt.lessThan) || match(tt.bitShiftL))) {
tsParseTypeArgumentsWithPossibleBitshift();
}
if (eatContextual(ContextualKeyword._implements)) {
state.tokens[state.tokens.length - 1].type = tt._implements;
Expand Down Expand Up @@ -1553,8 +1569,8 @@ export function tsParseAssignableListItemTypes(): void {
}

export function tsParseMaybeDecoratorArguments(): void {
if (match(tt.lessThan)) {
tsParseTypeArguments();
if (match(tt.lessThan) || match(tt.bitShiftL)) {
tsParseTypeArgumentsWithPossibleBitshift();
}
baseParseMaybeDecoratorArguments();
}
44 changes: 33 additions & 11 deletions src/parser/tokenizer/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -482,15 +482,36 @@ function readToken_plus_min(code: number): void {
}
}

// '<>'
function readToken_lt_gt(code: number): void {
function readToken_lt(): void {
const nextChar = input.charCodeAt(state.pos + 1);

if (nextChar === charCodes.lessThan) {
if (input.charCodeAt(state.pos + 2) === charCodes.equalsTo) {
finishOp(tt.assign, 3);
return;
}
// This still might be two instances of <, e.g. the TS type argument
// expression f<<T>() => void>() , but parse as left shift for now and we'll
// retokenize if necessary. We can't use isType for this case because we
// don't know yet if we're in a type.
finishOp(tt.bitShiftL, 2);
return;
}

if (nextChar === charCodes.equalsTo) {
// <=
finishOp(tt.relationalOrEqual, 2);
} else {
finishOp(tt.lessThan, 1);
}
}

function readToken_gt(): void {
const code = charCodes.greaterThan;
const nextChar = input.charCodeAt(state.pos + 1);

if (nextChar === code) {
const size =
code === charCodes.greaterThan && input.charCodeAt(state.pos + 2) === charCodes.greaterThan
? 3
: 2;
const size = input.charCodeAt(state.pos + 2) === charCodes.greaterThan ? 3 : 2;
if (input.charCodeAt(state.pos + size) === charCodes.equalsTo) {
finishOp(tt.assign, size + 1);
return;
Expand All @@ -500,15 +521,13 @@ function readToken_lt_gt(code: number): void {
finishOp(tt.greaterThan, 1);
return;
}
finishOp(tt.bitShift, size);
finishOp(tt.bitShiftR, size);
return;
}

if (nextChar === charCodes.equalsTo) {
// <= | >=
// >=
finishOp(tt.relationalOrEqual, 2);
} else if (code === charCodes.lessThan) {
finishOp(tt.lessThan, 1);
} else {
finishOp(tt.greaterThan, 1);
}
Expand Down Expand Up @@ -695,8 +714,11 @@ export function getTokenFromCode(code: number): void {
return;

case charCodes.lessThan:
readToken_lt();
return;

case charCodes.greaterThan:
readToken_lt_gt(code);
readToken_gt();
return;

case charCodes.equalsTo:
Expand Down
1 change: 1 addition & 0 deletions src/parser/tokenizer/keywords.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
export enum ContextualKeyword {
NONE,
_abstract,
_accessor,
_as,
_asserts,
_async,
Expand Down