Skip to content

Commit

Permalink
parser: limit maximum number of tokens
Browse files Browse the repository at this point in the history
Motivation: Parser CPU and memory usage is linear to the number of tokens in a
document however in extreme cases it becomes quadratic due to memory exhaustion.
On my mashine it happens on queries with 2k tokens.
For example:
```
{ a a <repeat 2k times> a }
```
It takes 741ms on my machine.
But if we create document of the same size but smaller number of
tokens it would be a lot faster.
Example:
```
{ a(arg: "a <repeat 2k times> a" }
```
Now it takes only 17ms to process, which is 43 time faster.

That mean if we limit document size we should make this limit small
since it take only two bytes to create a token, e.g. ` a`.
But that will hart legit documents that have long tokens in them
(comments, describtions, strings, long names, etc.).

That's why this PR adds a mechanism to limit number of token in
parsed document.
Also exact same mechanism implemented in graphql-java, see:
graphql-java/graphql-java#2549

I also tried alternative approach of counting nodes and it gives
slightly better approximation of how many resources would be consumed.
However comparing to the tokens, AST nodes is implementation detail of graphql-js
so it's imposible to replicate in other implementation (e.g. to count
this number on a client).
  • Loading branch information
IvanGoncharov committed Jul 28, 2022
1 parent 5aadd61 commit e04ce2e
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 8 deletions.
15 changes: 15 additions & 0 deletions src/language/__tests__/parser-test.ts
Expand Up @@ -88,6 +88,21 @@ describe('Parser', () => {
`);
});

it('limit maximum number of tokens', () => {
expect(() => parse('{ foo }', { maxTokens: 3 })).to.not.throw();
expect(() => parse('{ foo }', { maxTokens: 2 })).to.throw(
'Syntax Error: Document contains more that 2 tokens. Parsing aborted.',
);

expect(() =>
parse('{ foo(bar: "baz") }', { maxTokens: 8 }),
).to.not.throw();

expect(() => parse('{ foo(bar: "baz") }', { maxTokens: 7 })).to.throw(
'Syntax Error: Document contains more that 7 tokens. Parsing aborted.',
);
});

it('parses variable inline values', () => {
expect(() =>
parse('{ field(complex: { a: { b: [ $var ] } }) }'),
Expand Down
43 changes: 35 additions & 8 deletions src/language/parser.ts
Expand Up @@ -82,6 +82,15 @@ export interface ParseOptions {
*/
noLocation?: boolean | undefined;

/**
* Parser CPU and memory usage is linear to the number of tokens in a document
* however in extreme cases it becomes quadratic due to memory exhaustion.
* Parsing happens before validation so even invalid queries can burn a lots of
* CPU time and memory.
* To prevent this you can set limit on maximum number of tokens.
*/
maxTokens: number | undefined;

/**
* @deprecated will be removed in the v17.0.0
*
Expand Down Expand Up @@ -206,12 +215,14 @@ export function parseType(
export class Parser {
protected _options: ParseOptions;
protected _lexer: Lexer;
protected _tokenCounter: number;

constructor(source: string | Source, options: ParseOptions = {}) {
const sourceObj = isSource(source) ? source : new Source(source);

this._lexer = new Lexer(sourceObj);
this._options = options;
this._tokenCounter = 0;
}

/**
Expand Down Expand Up @@ -634,13 +645,13 @@ export class Parser {
case TokenKind.BRACE_L:
return this.parseObject(isConst);
case TokenKind.INT:
this._lexer.advance();
this.advanceLexer();
return this.node<IntValueNode>(token, {
kind: Kind.INT,
value: token.value,
});
case TokenKind.FLOAT:
this._lexer.advance();
this.advanceLexer();
return this.node<FloatValueNode>(token, {
kind: Kind.FLOAT,
value: token.value,
Expand All @@ -649,7 +660,7 @@ export class Parser {
case TokenKind.BLOCK_STRING:
return this.parseStringLiteral();
case TokenKind.NAME:
this._lexer.advance();
this.advanceLexer();
switch (token.value) {
case 'true':
return this.node<BooleanValueNode>(token, {
Expand Down Expand Up @@ -695,7 +706,7 @@ export class Parser {

parseStringLiteral(): StringValueNode {
const token = this._lexer.token;
this._lexer.advance();
this.advanceLexer();
return this.node<StringValueNode>(token, {
kind: Kind.STRING,
value: token.value,
Expand Down Expand Up @@ -1479,7 +1490,7 @@ export class Parser {
expectToken(kind: TokenKind): Token {
const token = this._lexer.token;
if (token.kind === kind) {
this._lexer.advance();
this.advanceLexer();
return token;
}

Expand All @@ -1497,7 +1508,7 @@ export class Parser {
expectOptionalToken(kind: TokenKind): boolean {
const token = this._lexer.token;
if (token.kind === kind) {
this._lexer.advance();
this.advanceLexer();
return true;
}
return false;
Expand All @@ -1510,7 +1521,7 @@ export class Parser {
expectKeyword(value: string): void {
const token = this._lexer.token;
if (token.kind === TokenKind.NAME && token.value === value) {
this._lexer.advance();
this.advanceLexer();
} else {
throw syntaxError(
this._lexer.source,
Expand All @@ -1527,7 +1538,7 @@ export class Parser {
expectOptionalKeyword(value: string): boolean {
const token = this._lexer.token;
if (token.kind === TokenKind.NAME && token.value === value) {
this._lexer.advance();
this.advanceLexer();
return true;
}
return false;
Expand Down Expand Up @@ -1616,6 +1627,22 @@ export class Parser {
} while (this.expectOptionalToken(delimiterKind));
return nodes;
}

advanceLexer(): void {
const { maxTokens } = this._options;
const token = this._lexer.advance();

if (maxTokens !== undefined && token.kind !== TokenKind.EOF) {
++this._tokenCounter;
if (this._tokenCounter > maxTokens) {
throw syntaxError(
this._lexer.source,
token.start,
`Document contains more that ${maxTokens} tokens. Parsing aborted.`,
);
}
}
}
}

/**
Expand Down

0 comments on commit e04ce2e

Please sign in to comment.