Skip to content

Commit

Permalink
fix(core): parse incorrect ML open tag as text (#29328)
Browse files Browse the repository at this point in the history
This PR alligns markup language lexer with the previous behaviour in version 7.x:
https://stackblitz.com/edit/angular-iancj2

While this behaviour is not perfect (we should be giving users an error message
here about invalid HTML instead of assuming text node) this is probably best we
can do without more substential re-write of lexing / parsing infrastructure.

This PR just fixes #29231 and restores VE behaviour - a more elaborate fix will
be done in a separate PR as it requries non-trivial rewrites.

PR Close #29328
  • Loading branch information
pkozlowski-opensource authored and matsko committed Mar 19, 2019
1 parent c0ad9e1 commit 4605df8
Show file tree
Hide file tree
Showing 2 changed files with 45 additions and 3 deletions.
10 changes: 8 additions & 2 deletions packages/compiler/src/ml_parser/lexer.ts
Expand Up @@ -461,12 +461,14 @@ class _Tokenizer {
let tagName: string;
let prefix: string;
let openTagToken: Token|undefined;
let tokensBeforeTagOpen = this.tokens.length;
const innerStart = this._cursor.clone();
try {
if (!chars.isAsciiLetter(this._cursor.peek())) {
throw this._createError(
_unexpectedCharacterErrorMsg(this._cursor.peek()), this._cursor.getSpan(start));
}

openTagToken = this._consumeTagOpenStart(start);
prefix = openTagToken.parts[0];
tagName = openTagToken.parts[1];
Expand All @@ -483,10 +485,10 @@ class _Tokenizer {
this._consumeTagOpenEnd();
} catch (e) {
if (e instanceof _ControlFlowError) {
// When the start tag is invalid, assume we want a "<"
// When the start tag is invalid (including invalid "attributes"), assume we want a "<"
this._cursor = innerStart;
if (openTagToken) {
this.tokens.pop();
this.tokens.length = tokensBeforeTagOpen;
}
// Back to back text tokens are merged at the end
this._beginToken(TokenType.TEXT, start);
Expand Down Expand Up @@ -528,6 +530,10 @@ class _Tokenizer {
}

private _consumeAttributeName() {
const attrNameStart = this._cursor.peek();
if (attrNameStart === chars.$SQ || attrNameStart === chars.$DQ) {
throw this._createError(_unexpectedCharacterErrorMsg(attrNameStart), this._cursor.getSpan());
}
this._beginToken(TokenType.ATTR_NAME);
const prefixAndName = this._consumePrefixAndName();
this._endToken(prefixAndName);
Expand Down
38 changes: 37 additions & 1 deletion packages/compiler/test/ml_parser/lexer_spec.ts
Expand Up @@ -7,7 +7,6 @@
*/

import {getHtmlTagDefinition} from '../../src/ml_parser/html_tags';
import {InterpolationConfig} from '../../src/ml_parser/interpolation_config';
import * as lex from '../../src/ml_parser/lexer';
import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_util';

Expand Down Expand Up @@ -378,6 +377,18 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u
]);
});

it('should report missing closing single quote', () => {
expect(tokenizeAndHumanizeErrors('<t a=\'b>')).toEqual([
[lex.TokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:8'],
]);
});

it('should report missing closing double quote', () => {
expect(tokenizeAndHumanizeErrors('<t a="b>')).toEqual([
[lex.TokenType.ATTR_VALUE, 'Unexpected character "EOF"', '0:8'],
]);
});

});

describe('closing tags', () => {
Expand Down Expand Up @@ -552,6 +563,31 @@ import {ParseLocation, ParseSourceFile, ParseSourceSpan} from '../../src/parse_u
]);
});

it('should parse start tags quotes in place of an attribute name as text', () => {
expect(tokenizeAndHumanizeParts('<t ">')).toEqual([
[lex.TokenType.TEXT, '<t ">'],
[lex.TokenType.EOF],
]);

expect(tokenizeAndHumanizeParts('<t \'>')).toEqual([
[lex.TokenType.TEXT, '<t \'>'],
[lex.TokenType.EOF],
]);
});

it('should parse start tags quotes in place of an attribute name (after a valid attribute) as text',
() => {
expect(tokenizeAndHumanizeParts('<t a="b" ">')).toEqual([
[lex.TokenType.TEXT, '<t a="b" ">'],
[lex.TokenType.EOF],
]);

expect(tokenizeAndHumanizeParts('<t a=\'b\' \'>')).toEqual([
[lex.TokenType.TEXT, '<t a=\'b\' \'>'],
[lex.TokenType.EOF],
]);
});

it('should be able to escape {', () => {
expect(tokenizeAndHumanizeParts('{{ "{" }}')).toEqual([
[lex.TokenType.TEXT, '{{ "{" }}'],
Expand Down

0 comments on commit 4605df8

Please sign in to comment.