Skip to content

Commit

Permalink
Merge 0108b80 into 01b5cec
Browse files Browse the repository at this point in the history
  • Loading branch information
Paul Lancaster committed Sep 26, 2020
2 parents 01b5cec + 0108b80 commit e52f418
Show file tree
Hide file tree
Showing 10 changed files with 108 additions and 27 deletions.
14 changes: 12 additions & 2 deletions boa/src/syntax/lexer/comment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,12 @@ use std::io::Read;
pub(super) struct SingleLineComment;

impl<R> Tokenizer<R> for SingleLineComment {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand Down Expand Up @@ -58,7 +63,12 @@ impl<R> Tokenizer<R> for SingleLineComment {
pub(super) struct MultiLineComment;

impl<R> Tokenizer<R> for MultiLineComment {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand Down
31 changes: 30 additions & 1 deletion boa/src/syntax/lexer/identifier.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,20 @@ use crate::{
};
use std::io::Read;

const STRICT_FORBIDDEN_IDENTIFIERS: [&str; 11] = [
"eval",
"arguments",
"implements",
"interface",
"let",
"package",
"private",
"protected",
"public",
"static",
"yield",
];

/// Identifier lexing.
///
/// More information:
Expand All @@ -31,7 +45,12 @@ impl Identifier {
}

impl<R> Tokenizer<R> for Identifier {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand All @@ -51,6 +70,16 @@ impl<R> Tokenizer<R> for Identifier {
if let Ok(keyword) = slice.parse() {
TokenKind::Keyword(keyword)
} else {
if strict_mode && STRICT_FORBIDDEN_IDENTIFIERS.contains(&slice) {
return Err(Error::Syntax(
format!(
"using future reserved keyword '{}' not allowed in strict mode",
slice
)
.into(),
start_pos,
));
}
TokenKind::identifier(slice)
}
}
Expand Down
33 changes: 21 additions & 12 deletions boa/src/syntax/lexer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,12 @@ pub use token::{Token, TokenKind};

trait Tokenizer<R> {
/// Lexes the next token.
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read;
}
Expand Down Expand Up @@ -109,7 +114,11 @@ impl<R> Lexer<R> {
// that means it could be multiple different tokens depending on the input token.
//
// As per https://tc39.es/ecma262/#sec-ecmascript-language-lexical-grammar
pub(crate) fn lex_slash_token(&mut self, start: Position) -> Result<Token, Error>
pub(crate) fn lex_slash_token(
&mut self,
start: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand All @@ -119,11 +128,11 @@ impl<R> Lexer<R> {
match c {
'/' => {
self.cursor.next_char()?.expect("/ token vanished"); // Consume the '/'
SingleLineComment.lex(&mut self.cursor, start)
SingleLineComment.lex(&mut self.cursor, start, strict_mode)
}
'*' => {
self.cursor.next_char()?.expect("* token vanished"); // Consume the '*'
MultiLineComment.lex(&mut self.cursor, start)
MultiLineComment.lex(&mut self.cursor, start, strict_mode)
}
ch => {
match self.get_goal() {
Expand All @@ -146,7 +155,7 @@ impl<R> Lexer<R> {
}
InputElement::RegExp | InputElement::RegExpOrTemplateTail => {
// Can be a regular expression.
RegexLiteral.lex(&mut self.cursor, start)
RegexLiteral.lex(&mut self.cursor, start, strict_mode)
}
}
}
Expand Down Expand Up @@ -188,13 +197,13 @@ impl<R> Lexer<R> {
TokenKind::LineTerminator,
Span::new(start, self.cursor.pos()),
)),
'"' | '\'' => StringLiteral::new(next_chr).lex(&mut self.cursor, start),
'`' => TemplateLiteral.lex(&mut self.cursor, start),
'"' | '\'' => StringLiteral::new(next_chr).lex(&mut self.cursor, start, strict_mode),
'`' => TemplateLiteral.lex(&mut self.cursor, start, strict_mode),
_ if next_chr.is_digit(10) => {
NumberLiteral::new(next_chr, strict_mode).lex(&mut self.cursor, start)
NumberLiteral::new(next_chr).lex(&mut self.cursor, start, strict_mode)
}
_ if next_chr.is_alphabetic() || next_chr == '$' || next_chr == '_' => {
Identifier::new(next_chr).lex(&mut self.cursor, start)
Identifier::new(next_chr).lex(&mut self.cursor, start, strict_mode)
}
';' => Ok(Token::new(
Punctuator::Semicolon.into(),
Expand All @@ -204,7 +213,7 @@ impl<R> Lexer<R> {
Punctuator::Colon.into(),
Span::new(start, self.cursor.pos()),
)),
'.' => SpreadLiteral::new().lex(&mut self.cursor, start),
'.' => SpreadLiteral::new().lex(&mut self.cursor, start, strict_mode),
'(' => Ok(Token::new(
Punctuator::OpenParen.into(),
Span::new(start, self.cursor.pos()),
Expand Down Expand Up @@ -237,9 +246,9 @@ impl<R> Lexer<R> {
Punctuator::Question.into(),
Span::new(start, self.cursor.pos()),
)),
'/' => self.lex_slash_token(start),
'/' => self.lex_slash_token(start, strict_mode),
'=' | '*' | '+' | '-' | '%' | '|' | '&' | '^' | '<' | '>' | '!' | '~' => {
Operator::new(next_chr).lex(&mut self.cursor, start)
Operator::new(next_chr).lex(&mut self.cursor, start, strict_mode)
}
_ => {
let details = format!(
Expand Down
16 changes: 10 additions & 6 deletions boa/src/syntax/lexer/number.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,13 +24,12 @@ use std::{io::Read, str::FromStr};
#[derive(Debug, Clone, Copy)]
pub(super) struct NumberLiteral {
init: char,
strict_mode: bool,
}

impl NumberLiteral {
/// Creates a new string literal lexer.
pub(super) fn new(init: char, strict_mode: bool) -> Self {
Self { init, strict_mode }
pub(super) fn new(init: char) -> Self {
Self { init }
}
}

Expand Down Expand Up @@ -135,7 +134,12 @@ where
}

impl<R> Tokenizer<R> for NumberLiteral {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand Down Expand Up @@ -187,7 +191,7 @@ impl<R> Tokenizer<R> for NumberLiteral {
ch => {
if ch.is_digit(8) {
// LegacyOctalIntegerLiteral
if self.strict_mode {
if strict_mode {
// LegacyOctalIntegerLiteral is forbidden with strict mode true.
return Err(Error::syntax(
"implicit octal literals are not allowed in strict mode",
Expand All @@ -205,7 +209,7 @@ impl<R> Tokenizer<R> for NumberLiteral {
// Indicates a numerical digit comes after then 0 but it isn't an octal digit
// so therefore this must be a number with an unneeded leading 0. This is
// forbidden in strict mode.
if self.strict_mode {
if strict_mode {
return Err(Error::syntax(
"leading 0's are not allowed in strict mode",
start_pos,
Expand Down
7 changes: 6 additions & 1 deletion boa/src/syntax/lexer/operator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,12 @@ impl Operator {
}

impl<R> Tokenizer<R> for Operator {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand Down
7 changes: 6 additions & 1 deletion boa/src/syntax/lexer/regex.rs
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,12 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
pub(super) struct RegexLiteral;

impl<R> Tokenizer<R> for RegexLiteral {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand Down
7 changes: 6 additions & 1 deletion boa/src/syntax/lexer/spread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,12 @@ impl SpreadLiteral {
}

impl<R> Tokenizer<R> for SpreadLiteral {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand Down
7 changes: 6 additions & 1 deletion boa/src/syntax/lexer/string.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,12 @@ enum StringTerminator {
}

impl<R> Tokenizer<R> for StringLiteral {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand Down
7 changes: 6 additions & 1 deletion boa/src/syntax/lexer/template.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,12 @@ use std::io::{self, ErrorKind, Read};
pub(super) struct TemplateLiteral;

impl<R> Tokenizer<R> for TemplateLiteral {
fn lex(&mut self, cursor: &mut Cursor<R>, start_pos: Position) -> Result<Token, Error>
fn lex(
&mut self,
cursor: &mut Cursor<R>,
start_pos: Position,
strict_mode: bool,
) -> Result<Token, Error>
where
R: Read,
{
Expand Down
6 changes: 5 additions & 1 deletion boa/src/syntax/parser/cursor/buffered_lexer/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,11 @@ where
pub(super) fn lex_regex(&mut self, start: Position) -> Result<Token, ParseError> {
let _timer = BoaProfiler::global().start_event("cursor::lex_regex()", "Parsing");
self.set_goal(InputElement::RegExp);
self.lexer.lex_slash_token(start).map_err(|e| e.into())

let strict_mode: bool = false; // TODO enable setting strict mode on/off.
self.lexer
.lex_slash_token(start, strict_mode)
.map_err(|e| e.into())
}

/// Fills the peeking buffer with the next token.
Expand Down

0 comments on commit e52f418

Please sign in to comment.