diff --git a/src/doc/guide-plugin.md b/src/doc/guide-plugin.md index 1145235c5f899..eb3e4ce75c470 100644 --- a/src/doc/guide-plugin.md +++ b/src/doc/guide-plugin.md @@ -55,7 +55,7 @@ extern crate syntax; extern crate rustc; use syntax::codemap::Span; -use syntax::parse::token::{IDENT, get_ident}; +use syntax::parse::token; use syntax::ast::{TokenTree, TtToken}; use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr}; use syntax::ext::build::AstBuilder; // trait for expr_uint @@ -71,7 +71,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) ("I", 1)]; let text = match args { - [TtToken(_, IDENT(s, _))] => get_ident(s).to_string(), + [TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(), _ => { cx.span_err(sp, "argument should be a single identifier"); return DummyResult::any(sp); diff --git a/src/grammar/verify.rs b/src/grammar/verify.rs index a95eeb3c97dc2..16abf5160fa5b 100644 --- a/src/grammar/verify.rs +++ b/src/grammar/verify.rs @@ -30,12 +30,12 @@ use rustc::driver::{session, config}; use syntax::ast; use syntax::ast::Name; -use syntax::parse::token::*; +use syntax::parse::token; use syntax::parse::lexer::TokenAndSpan; fn parse_token_list(file: &str) -> HashMap { fn id() -> Token { - IDENT(ast::Ident { name: Name(0), ctxt: 0, }, false) + token::Ident(ast::Ident { name: Name(0), ctxt: 0, }, false) } let mut res = HashMap::new(); @@ -52,64 +52,64 @@ fn parse_token_list(file: &str) -> HashMap { let num = line.slice_from(eq + 1); let tok = match val { - "SHR" => BINOP(SHR), - "DOLLAR" => DOLLAR, - "LT" => LT, - "STAR" => BINOP(STAR), - "FLOAT_SUFFIX" => id(), - "INT_SUFFIX" => id(), - "SHL" => BINOP(SHL), - "LBRACE" => LBRACE, - "RARROW" => RARROW, - "LIT_STR" => LIT_STR(Name(0)), - "DOTDOT" => DOTDOT, - "MOD_SEP" => MOD_SEP, - "DOTDOTDOT" => DOTDOTDOT, - "NOT" => NOT, - "AND" => BINOP(AND), - "LPAREN" => LPAREN, - "ANDAND" => ANDAND, - "AT" => AT, - "LBRACKET" => LBRACKET, - "LIT_STR_RAW" => LIT_STR_RAW(Name(0), 0), - "RPAREN" => RPAREN, - "SLASH" => BINOP(SLASH), - "COMMA" => COMMA, - "LIFETIME" => LIFETIME(ast::Ident { name: Name(0), ctxt: 0 }), - "CARET" => BINOP(CARET), - "TILDE" => TILDE, - "IDENT" => id(), - "PLUS" => BINOP(PLUS), - "LIT_CHAR" => LIT_CHAR(Name(0)), - "LIT_BYTE" => LIT_BYTE(Name(0)), - "EQ" => EQ, - "RBRACKET" => RBRACKET, - "COMMENT" => COMMENT, - "DOC_COMMENT" => DOC_COMMENT(Name(0)), - "DOT" => DOT, - "EQEQ" => EQEQ, - "NE" => NE, - "GE" => GE, - "PERCENT" => BINOP(PERCENT), - "RBRACE" => RBRACE, - "BINOP" => BINOP(PLUS), - "POUND" => POUND, - "OROR" => OROR, - "LIT_INTEGER" => LIT_INTEGER(Name(0)), - "BINOPEQ" => BINOPEQ(PLUS), - "LIT_FLOAT" => LIT_FLOAT(Name(0)), - "WHITESPACE" => WS, - "UNDERSCORE" => UNDERSCORE, - "MINUS" => BINOP(MINUS), - "SEMI" => SEMI, - "COLON" => COLON, - "FAT_ARROW" => FAT_ARROW, - "OR" => BINOP(OR), - "GT" => GT, - "LE" => LE, - "LIT_BINARY" => LIT_BINARY(Name(0)), - "LIT_BINARY_RAW" => LIT_BINARY_RAW(Name(0), 0), - _ => continue + "SHR" => token::BinOp(token::Shr), + "DOLLAR" => token::Dollar, + "LT" => token::Lt, + "STAR" => token::BinOp(token::Star), + "FLOAT_SUFFIX" => id(), + "INT_SUFFIX" => id(), + "SHL" => token::BinOp(token::Shl), + "LBRACE" => token::LBrace, + "RARROW" => token::Rarrow, + "LIT_STR" => token::LitStr(Name(0)), + "DOTDOT" => token::DotDot, + "MOD_SEP" => token::ModSep, + "DOTDOTDOT" => token::DotDotDot, + "NOT" => token::Not, + "AND" => token::BinOp(token::And), + "LPAREN" => token::LParen, + "ANDAND" => token::AndAnd, + "AT" => token::At, + "LBRACKET" => token::LBracket, + "LIT_STR_RAW" => token::LitStrRaw(Name(0), 0), + "RPAREN" => token::RParen, + "SLASH" => token::BinOp(token::Slash), + "COMMA" => token::Comma, + "LIFETIME" => token::Lifetime(ast::Ident { name: Name(0), ctxt: 0 }), + "CARET" => token::BinOp(token::Caret), + "TILDE" => token::Tilde, + "IDENT" => token::Id(), + "PLUS" => token::BinOp(token::Plus), + "LIT_CHAR" => token::LitChar(Name(0)), + "LIT_BYTE" => token::LitByte(Name(0)), + "EQ" => token::Eq, + "RBRACKET" => token::RBracket, + "COMMENT" => token::Comment, + "DOC_COMMENT" => token::DocComment(Name(0)), + "DOT" => token::Dot, + "EQEQ" => token::EqEq, + "NE" => token::Ne, + "GE" => token::Ge, + "PERCENT" => token::BinOp(token::Percent), + "RBRACE" => token::RBrace, + "BINOP" => token::BinOp(token::Plus), + "POUND" => token::Pound, + "OROR" => token::OrOr, + "LIT_INTEGER" => token::LitInteger(Name(0)), + "BINOPEQ" => token::BinOpEq(token::Plus), + "LIT_FLOAT" => token::LitFloat(Name(0)), + "WHITESPACE" => token::Whitespace, + "UNDERSCORE" => token::Underscore, + "MINUS" => token::BinOp(token::Minus), + "SEMI" => token::Semi, + "COLON" => token::Colon, + "FAT_ARROW" => token::FatArrow, + "OR" => token::BinOp(token::Or), + "GT" => token::Gt, + "LE" => token::Le, + "LIT_BINARY" => token::LitBinary(Name(0)), + "LIT_BINARY_RAW" => token::LitBinaryRaw(Name(0), 0), + _ => continue, }; res.insert(num.to_string(), tok); @@ -119,19 +119,19 @@ fn parse_token_list(file: &str) -> HashMap { res } -fn str_to_binop(s: &str) -> BinOp { +fn str_to_binop(s: &str) -> BinOpToken { match s { - "+" => PLUS, - "/" => SLASH, - "-" => MINUS, - "*" => STAR, - "%" => PERCENT, - "^" => CARET, - "&" => AND, - "|" => OR, - "<<" => SHL, - ">>" => SHR, - _ => fail!("Bad binop str `{}`", s) + "+" => token::Plus, + "/" => token::Slash, + "-" => token::Minus, + "*" => token::Star, + "%" => token::Percent, + "^" => token::Caret, + "&" => token::And, + "|" => token::Or, + "<<" => token::Shl, + ">>" => token::Shr, + _ => fail!("Bad binop str `{}`", s), } } @@ -186,19 +186,20 @@ fn parse_antlr_token(s: &str, tokens: &HashMap) -> TokenAndSpan { debug!("What we got: content (`{}`), proto: {}", content, proto_tok); let real_tok = match *proto_tok { - BINOP(..) => BINOP(str_to_binop(content)), - BINOPEQ(..) => BINOPEQ(str_to_binop(content.slice_to(content.len() - 1))), - LIT_STR(..) => LIT_STR(fix(content)), - LIT_STR_RAW(..) => LIT_STR_RAW(fix(content), count(content)), - LIT_CHAR(..) => LIT_CHAR(fixchar(content)), - LIT_BYTE(..) => LIT_BYTE(fixchar(content)), - DOC_COMMENT(..) => DOC_COMMENT(nm), - LIT_INTEGER(..) => LIT_INTEGER(nm), - LIT_FLOAT(..) => LIT_FLOAT(nm), - LIT_BINARY(..) => LIT_BINARY(nm), - LIT_BINARY_RAW(..) => LIT_BINARY_RAW(fix(content), count(content)), - IDENT(..) => IDENT(ast::Ident { name: nm, ctxt: 0 }, true), - LIFETIME(..) => LIFETIME(ast::Ident { name: nm, ctxt: 0 }), + token::BinOp(..) => token::BinOp(str_to_binop(content)), + token::BinOpEq(..) => token::BinOpEq(str_to_binop(content.slice_to( + content.len() - 1))), + token::LitStr(..) => token::LitStr(fix(content)), + token::LitStrRaw(..) => token::LitStrRaw(fix(content), count(content)), + token::LitChar(..) => token::LitChar(fixchar(content)), + token::LitByte(..) => token::LitByte(fixchar(content)), + token::DocComment(..) => token::DocComment(nm), + token::LitInteger(..) => token::LitInteger(nm), + token::LitFloat(..) => token::LitFloat(nm), + token::LitBinary(..) => token::LitBinary(nm), + token::LitBinaryRaw(..) => token::LitBinaryRaw(fix(content), count(content)), + token::Ident(..) => token::Ident(ast::Ident { name: nm, ctxt: 0 }, true), + token::Lifetime(..) => token::Lifetime(ast::Ident { name: nm, ctxt: 0 }), ref t => t.clone() }; @@ -222,8 +223,8 @@ fn parse_antlr_token(s: &str, tokens: &HashMap) -> TokenAndSpan { fn tok_cmp(a: &Token, b: &Token) -> bool { match a { - &IDENT(id, _) => match b { - &IDENT(id2, _) => id == id2, + &token::Ident(id, _) => match b { + &token::Ident(id2, _) => id == id2, _ => false }, _ => a == b @@ -281,19 +282,20 @@ fn main() { ) ) - matches!(LIT_BYTE(..), - LIT_CHAR(..), - LIT_INTEGER(..), - LIT_FLOAT(..), - LIT_STR(..), - LIT_STR_RAW(..), - LIT_BINARY(..), - LIT_BINARY_RAW(..), - IDENT(..), - LIFETIME(..), - INTERPOLATED(..), - DOC_COMMENT(..), - SHEBANG(..) + matches!( + LitByte(..), + LitChar(..), + LitInteger(..), + LitFloat(..), + LitStr(..), + LitStrRaw(..), + LitBinary(..), + LitBinaryRaw(..), + Ident(..), + Lifetime(..), + Interpolated(..), + DocComment(..), + Shebang(..) ); } } diff --git a/src/libregex_macros/lib.rs b/src/libregex_macros/lib.rs index 04c0e7cc21fb1..28b18ef0bf901 100644 --- a/src/libregex_macros/lib.rs +++ b/src/libregex_macros/lib.rs @@ -634,7 +634,7 @@ fn parse(cx: &mut ExtCtxt, tts: &[ast::TokenTree]) -> Option { return None } }; - if !parser.eat(&token::EOF) { + if !parser.eat(&token::Eof) { cx.span_err(parser.span, "only one string literal allowed"); return None; } diff --git a/src/librustc/middle/save/mod.rs b/src/librustc/middle/save/mod.rs index 47c596f347495..4748de01240cf 100644 --- a/src/librustc/middle/save/mod.rs +++ b/src/librustc/middle/save/mod.rs @@ -428,7 +428,7 @@ impl <'l, 'tcx> DxrVisitor<'l, 'tcx> { let qualname = format!("{}::{}", qualname, name); let typ = ppaux::ty_to_string(&self.analysis.ty_cx, (*self.analysis.ty_cx.node_types.borrow())[field.node.id as uint]); - match self.span.sub_span_before_token(field.span, token::COLON) { + match self.span.sub_span_before_token(field.span, token::Colon) { Some(sub_span) => self.fmt.field_str(field.span, Some(sub_span), field.node.id, @@ -1175,7 +1175,7 @@ impl<'l, 'tcx, 'v> Visitor<'v> for DxrVisitor<'l, 'tcx> { // 'use' always introduces an alias, if there is not an explicit // one, there is an implicit one. let sub_span = - match self.span.sub_span_before_token(path.span, token::EQ) { + match self.span.sub_span_before_token(path.span, token::Eq) { Some(sub_span) => Some(sub_span), None => sub_span, }; diff --git a/src/librustc/middle/save/span_utils.rs b/src/librustc/middle/save/span_utils.rs index 10832572ae255..08567dba3a4fa 100644 --- a/src/librustc/middle/save/span_utils.rs +++ b/src/librustc/middle/save/span_utils.rs @@ -93,7 +93,7 @@ impl<'a> SpanUtils<'a> { let mut bracket_count = 0u; loop { let ts = toks.next_token(); - if ts.tok == token::EOF { + if ts.tok == token::Eof { return self.make_sub_span(span, result) } if bracket_count == 0 && @@ -102,9 +102,9 @@ impl<'a> SpanUtils<'a> { } bracket_count += match ts.tok { - token::LT => 1, - token::GT => -1, - token::BINOP(token::SHR) => -2, + token::Lt => 1, + token::Gt => -1, + token::BinOp(token::Shr) => -2, _ => 0 } } @@ -116,7 +116,7 @@ impl<'a> SpanUtils<'a> { let mut bracket_count = 0u; loop { let ts = toks.next_token(); - if ts.tok == token::EOF { + if ts.tok == token::Eof { return None; } if bracket_count == 0 && @@ -125,9 +125,9 @@ impl<'a> SpanUtils<'a> { } bracket_count += match ts.tok { - token::LT => 1, - token::GT => -1, - token::BINOP(token::SHR) => -2, + token::Lt => 1, + token::Gt => -1, + token::BinOp(token::Shr) => -2, _ => 0 } } @@ -141,32 +141,32 @@ impl<'a> SpanUtils<'a> { let mut result = None; let mut bracket_count = 0u; let mut last_span = None; - while prev.tok != token::EOF { + while prev.tok != token::Eof { last_span = None; let mut next = toks.next_token(); - if (next.tok == token::LPAREN || - next.tok == token::LT) && + if (next.tok == token::LParen || + next.tok == token::Lt) && bracket_count == 0 && is_ident(&prev.tok) { result = Some(prev.sp); } if bracket_count == 0 && - next.tok == token::MOD_SEP { + next.tok == token::ModSep { let old = prev; prev = next; next = toks.next_token(); - if next.tok == token::LT && + if next.tok == token::Lt && is_ident(&old.tok) { result = Some(old.sp); } } bracket_count += match prev.tok { - token::LPAREN | token::LT => 1, - token::RPAREN | token::GT => -1, - token::BINOP(token::SHR) => -2, + token::LParen | token::Lt => 1, + token::RParen | token::Gt => -1, + token::BinOp(token::Shr) => -2, _ => 0 }; @@ -191,21 +191,21 @@ impl<'a> SpanUtils<'a> { loop { let next = toks.next_token(); - if (next.tok == token::LT || - next.tok == token::COLON) && + if (next.tok == token::Lt || + next.tok == token::Colon) && bracket_count == 0 && is_ident(&prev.tok) { result = Some(prev.sp); } bracket_count += match prev.tok { - token::LT => 1, - token::GT => -1, - token::BINOP(token::SHR) => -2, + token::Lt => 1, + token::Gt => -1, + token::BinOp(token::Shr) => -2, _ => 0 }; - if next.tok == token::EOF { + if next.tok == token::Eof { break; } prev = next; @@ -235,7 +235,7 @@ impl<'a> SpanUtils<'a> { let mut bracket_count = 0i; loop { let ts = toks.next_token(); - if ts.tok == token::EOF { + if ts.tok == token::Eof { if bracket_count != 0 { let loc = self.sess.codemap().lookup_char_pos(span.lo); self.sess.span_bug(span, format!( @@ -248,10 +248,10 @@ impl<'a> SpanUtils<'a> { return result; } bracket_count += match ts.tok { - token::LT => 1, - token::GT => -1, - token::BINOP(token::SHL) => 2, - token::BINOP(token::SHR) => -2, + token::Lt => 1, + token::Gt => -1, + token::BinOp(token::Shl) => 2, + token::BinOp(token::Shr) => -2, _ => 0 }; if is_ident(&ts.tok) && @@ -265,7 +265,7 @@ impl<'a> SpanUtils<'a> { let mut toks = self.retokenise_span(span); let mut prev = toks.next_token(); loop { - if prev.tok == token::EOF { + if prev.tok == token::Eof { return None; } let next = toks.next_token(); @@ -282,12 +282,12 @@ impl<'a> SpanUtils<'a> { let mut toks = self.retokenise_span(span); loop { let ts = toks.next_token(); - if ts.tok == token::EOF { + if ts.tok == token::Eof { return None; } if is_keyword(keyword, &ts.tok) { let ts = toks.next_token(); - if ts.tok == token::EOF { + if ts.tok == token::Eof { return None } else { return self.make_sub_span(span, Some(ts.sp)); diff --git a/src/librustdoc/html/highlight.rs b/src/librustdoc/html/highlight.rs index 85455b9df9ed1..481cd19739477 100644 --- a/src/librustdoc/html/highlight.rs +++ b/src/librustdoc/html/highlight.rs @@ -17,7 +17,7 @@ use html::escape::Escape; use std::io; use syntax::parse::lexer; -use syntax::parse::token as t; +use syntax::parse::token; use syntax::parse; /// Highlights some source code, returning the HTML output. @@ -63,19 +63,19 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, let snip = |sp| sess.span_diagnostic.cm.span_to_snippet(sp).unwrap(); - if next.tok == t::EOF { break } + if next.tok == token::Eof { break } let klass = match next.tok { - t::WS => { + token::Whitespace => { try!(write!(out, "{}", Escape(snip(next.sp).as_slice()))); continue }, - t::COMMENT => { + token::Comment => { try!(write!(out, "{}", Escape(snip(next.sp).as_slice()))); continue }, - t::SHEBANG(s) => { + token::Shebang(s) => { try!(write!(out, "{}", Escape(s.as_str()))); continue }, @@ -83,24 +83,25 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, // that it's the address-of operator instead of the and-operator. // This allows us to give all pointers their own class (`Box` and // `@` are below). - t::BINOP(t::AND) if lexer.peek().sp.lo == next.sp.hi => "kw-2", - t::AT | t::TILDE => "kw-2", + token::BinOp(token::And) if lexer.peek().sp.lo == next.sp.hi => "kw-2", + token::At | token::Tilde => "kw-2", // consider this as part of a macro invocation if there was a // leading identifier - t::NOT if is_macro => { is_macro = false; "macro" } + token::Not if is_macro => { is_macro = false; "macro" } // operators - t::EQ | t::LT | t::LE | t::EQEQ | t::NE | t::GE | t::GT | - t::ANDAND | t::OROR | t::NOT | t::BINOP(..) | t::RARROW | - t::BINOPEQ(..) | t::FAT_ARROW => "op", + token::Eq | token::Lt | token::Le | token::EqEq | token::Ne | token::Ge | token::Gt | + token::AndAnd | token::OrOr | token::Not | token::BinOp(..) | token::RArrow | + token::BinOpEq(..) | token::FatArrow => "op", // miscellaneous, no highlighting - t::DOT | t::DOTDOT | t::DOTDOTDOT | t::COMMA | t::SEMI | - t::COLON | t::MOD_SEP | t::LARROW | t::LPAREN | - t::RPAREN | t::LBRACKET | t::LBRACE | t::RBRACE | t::QUESTION => "", - t::DOLLAR => { - if t::is_ident(&lexer.peek().tok) { + token::Dot | token::DotDot | token::DotDotDot | token::Comma | token::Semi | + token::Colon | token::ModSep | token::LArrow | token::LParen | + token::RParen | token::LBracket | token::LBrace | token::RBrace | + token::Question => "", + token::Dollar => { + if token::is_ident(&lexer.peek().tok) { is_macro_nonterminal = true; "macro-nonterminal" } else { @@ -112,12 +113,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, // continue highlighting it as an attribute until the ending ']' is // seen, so skip out early. Down below we terminate the attribute // span when we see the ']'. - t::POUND => { + token::Pound => { is_attribute = true; try!(write!(out, r"#")); continue } - t::RBRACKET => { + token::RBracket => { if is_attribute { is_attribute = false; try!(write!(out, "]")); @@ -128,15 +129,15 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, } // text literals - t::LIT_BYTE(..) | t::LIT_BINARY(..) | t::LIT_BINARY_RAW(..) | - t::LIT_CHAR(..) | t::LIT_STR(..) | t::LIT_STR_RAW(..) => "string", + token::LitByte(..) | token::LitBinary(..) | token::LitBinaryRaw(..) | + token::LitChar(..) | token::LitStr(..) | token::LitStrRaw(..) => "string", // number literals - t::LIT_INTEGER(..) | t::LIT_FLOAT(..) => "number", + token::LitInteger(..) | token::LitFloat(..) => "number", // keywords are also included in the identifier set - t::IDENT(ident, _is_mod_sep) => { - match t::get_ident(ident).get() { + token::Ident(ident, _is_mod_sep) => { + match token::get_ident(ident).get() { "ref" | "mut" => "kw-2", "self" => "self", @@ -145,12 +146,12 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, "Option" | "Result" => "prelude-ty", "Some" | "None" | "Ok" | "Err" => "prelude-val", - _ if t::is_any_keyword(&next.tok) => "kw", + _ if token::is_any_keyword(&next.tok) => "kw", _ => { if is_macro_nonterminal { is_macro_nonterminal = false; "macro-nonterminal" - } else if lexer.peek().tok == t::NOT { + } else if lexer.peek().tok == token::Not { is_macro = true; "macro" } else { @@ -160,9 +161,9 @@ fn doit(sess: &parse::ParseSess, mut lexer: lexer::StringReader, } } - t::LIFETIME(..) => "lifetime", - t::DOC_COMMENT(..) => "doccomment", - t::UNDERSCORE | t::EOF | t::INTERPOLATED(..) => "", + token::Lifetime(..) => "lifetime", + token::DocComment(..) => "doccomment", + token::Underscore | token::Eof | token::Interpolated(..) => "", }; // as mentioned above, use the original source code instead of diff --git a/src/libsyntax/diagnostics/plugin.rs b/src/libsyntax/diagnostics/plugin.rs index b8795ad5be80f..d9d549f684125 100644 --- a/src/libsyntax/diagnostics/plugin.rs +++ b/src/libsyntax/diagnostics/plugin.rs @@ -50,7 +50,7 @@ pub fn expand_diagnostic_used<'cx>(ecx: &'cx mut ExtCtxt, token_tree: &[TokenTree]) -> Box { let code = match token_tree { - [ast::TtToken(_, token::IDENT(code, _))] => code, + [ast::TtToken(_, token::Ident(code, _))] => code, _ => unreachable!() }; with_registered_diagnostics(|diagnostics| { @@ -82,12 +82,12 @@ pub fn expand_register_diagnostic<'cx>(ecx: &'cx mut ExtCtxt, token_tree: &[TokenTree]) -> Box { let (code, description) = match token_tree { - [ast::TtToken(_, token::IDENT(ref code, _))] => { + [ast::TtToken(_, token::Ident(ref code, _))] => { (code, None) }, - [ast::TtToken(_, token::IDENT(ref code, _)), - ast::TtToken(_, token::COMMA), - ast::TtToken(_, token::LIT_STR_RAW(description, _))] => { + [ast::TtToken(_, token::Ident(ref code, _)), + ast::TtToken(_, token::Comma), + ast::TtToken(_, token::LitStrRaw(description, _))] => { (code, Some(description)) } _ => unreachable!() @@ -110,7 +110,7 @@ pub fn expand_build_diagnostic_array<'cx>(ecx: &'cx mut ExtCtxt, token_tree: &[TokenTree]) -> Box { let name = match token_tree { - [ast::TtToken(_, token::IDENT(ref name, _))] => name, + [ast::TtToken(_, token::Ident(ref name, _))] => name, _ => unreachable!() }; diff --git a/src/libsyntax/ext/asm.rs b/src/libsyntax/ext/asm.rs index 702be0c0eeede..2b52b7feaccd0 100644 --- a/src/libsyntax/ext/asm.rs +++ b/src/libsyntax/ext/asm.rs @@ -72,21 +72,21 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) asm_str_style = Some(style); } Outputs => { - while p.token != token::EOF && - p.token != token::COLON && - p.token != token::MOD_SEP { + while p.token != token::Eof && + p.token != token::Colon && + p.token != token::ModSep { if outputs.len() != 0 { - p.eat(&token::COMMA); + p.eat(&token::Comma); } let (constraint, _str_style) = p.parse_str(); let span = p.last_span; - p.expect(&token::LPAREN); + p.expect(&token::LParen); let out = p.parse_expr(); - p.expect(&token::RPAREN); + p.expect(&token::RParen); // Expands a read+write operand into two operands. // @@ -113,12 +113,12 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) } } Inputs => { - while p.token != token::EOF && - p.token != token::COLON && - p.token != token::MOD_SEP { + while p.token != token::Eof && + p.token != token::Colon && + p.token != token::ModSep { if inputs.len() != 0 { - p.eat(&token::COMMA); + p.eat(&token::Comma); } let (constraint, _str_style) = p.parse_str(); @@ -129,21 +129,21 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) cx.span_err(p.last_span, "input operand constraint contains '+'"); } - p.expect(&token::LPAREN); + p.expect(&token::LParen); let input = p.parse_expr(); - p.expect(&token::RPAREN); + p.expect(&token::RParen); inputs.push((constraint, input)); } } Clobbers => { let mut clobs = Vec::new(); - while p.token != token::EOF && - p.token != token::COLON && - p.token != token::MOD_SEP { + while p.token != token::Eof && + p.token != token::Colon && + p.token != token::ModSep { if clobs.len() != 0 { - p.eat(&token::COMMA); + p.eat(&token::Comma); } let (s, _str_style) = p.parse_str(); @@ -172,8 +172,8 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) cx.span_warn(p.last_span, "unrecognized option"); } - if p.token == token::COMMA { - p.eat(&token::COMMA); + if p.token == token::Comma { + p.eat(&token::Comma); } } StateNone => () @@ -183,17 +183,17 @@ pub fn expand_asm<'cx>(cx: &'cx mut ExtCtxt, sp: Span, tts: &[ast::TokenTree]) // MOD_SEP is a double colon '::' without space in between. // When encountered, the state must be advanced twice. match (&p.token, state.next(), state.next().next()) { - (&token::COLON, StateNone, _) | - (&token::MOD_SEP, _, StateNone) => { + (&token::Colon, StateNone, _) | + (&token::ModSep, _, StateNone) => { p.bump(); break 'statement; } - (&token::COLON, st, _) | - (&token::MOD_SEP, _, st) => { + (&token::Colon, st, _) | + (&token::ModSep, _, st) => { p.bump(); state = st; } - (&token::EOF, _, _) => break 'statement, + (&token::Eof, _, _) => break 'statement, _ => break } } diff --git a/src/libsyntax/ext/base.rs b/src/libsyntax/ext/base.rs index 64c8068607aa0..a8326e79ef368 100644 --- a/src/libsyntax/ext/base.rs +++ b/src/libsyntax/ext/base.rs @@ -684,8 +684,8 @@ pub fn get_single_str_from_tts(cx: &ExtCtxt, cx.span_err(sp, format!("{} takes 1 argument.", name).as_slice()); } else { match tts[0] { - ast::TtToken(_, token::LIT_STR(ident)) => return Some(parse::str_lit(ident.as_str())), - ast::TtToken(_, token::LIT_STR_RAW(ident, _)) => { + ast::TtToken(_, token::LitStr(ident)) => return Some(parse::str_lit(ident.as_str())), + ast::TtToken(_, token::LitStrRaw(ident, _)) => { return Some(parse::raw_str_lit(ident.as_str())) } _ => { @@ -704,12 +704,12 @@ pub fn get_exprs_from_tts(cx: &mut ExtCtxt, tts: &[ast::TokenTree]) -> Option>> { let mut p = cx.new_parser_from_tts(tts); let mut es = Vec::new(); - while p.token != token::EOF { + while p.token != token::Eof { es.push(cx.expander().fold_expr(p.parse_expr())); - if p.eat(&token::COMMA) { + if p.eat(&token::Comma) { continue; } - if p.token != token::EOF { + if p.token != token::Eof { cx.span_err(sp, "expected token: `,`"); return None; } diff --git a/src/libsyntax/ext/cfg.rs b/src/libsyntax/ext/cfg.rs index f697acb417de7..72da60ffe0941 100644 --- a/src/libsyntax/ext/cfg.rs +++ b/src/libsyntax/ext/cfg.rs @@ -29,7 +29,7 @@ pub fn expand_cfg<'cx>(cx: &mut ExtCtxt, let mut p = cx.new_parser_from_tts(tts); let cfg = p.parse_meta_item(); - if !p.eat(&token::EOF) { + if !p.eat(&token::Eof) { cx.span_err(sp, "expected 1 cfg-pattern"); return DummyResult::expr(sp); } diff --git a/src/libsyntax/ext/concat_idents.rs b/src/libsyntax/ext/concat_idents.rs index e12f9ee133a32..e5e93a7d8b3bb 100644 --- a/src/libsyntax/ext/concat_idents.rs +++ b/src/libsyntax/ext/concat_idents.rs @@ -23,21 +23,21 @@ pub fn expand_syntax_ext<'cx>(cx: &mut ExtCtxt, sp: Span, tts: &[ast::TokenTree] for (i, e) in tts.iter().enumerate() { if i & 1 == 1 { match *e { - ast::TtToken(_, token::COMMA) => (), + ast::TtToken(_, token::Comma) => {}, _ => { cx.span_err(sp, "concat_idents! expecting comma."); return DummyResult::expr(sp); - } + }, } } else { match *e { - ast::TtToken(_, token::IDENT(ident,_)) => { + ast::TtToken(_, token::Ident(ident, _)) => { res_str.push_str(token::get_ident(ident).get()) - } + }, _ => { cx.span_err(sp, "concat_idents! requires ident args."); return DummyResult::expr(sp); - } + }, } } } diff --git a/src/libsyntax/ext/format.rs b/src/libsyntax/ext/format.rs index 87cd61c9b2237..fdf61d4abd9da 100644 --- a/src/libsyntax/ext/format.rs +++ b/src/libsyntax/ext/format.rs @@ -91,7 +91,7 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool, // Parse the leading function expression (maybe a block, maybe a path) let invocation = if allow_method { let e = p.parse_expr(); - if !p.eat(&token::COMMA) { + if !p.eat(&token::Comma) { ecx.span_err(sp, "expected token: `,`"); return (Call(e), None); } @@ -99,28 +99,28 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool, } else { Call(p.parse_expr()) }; - if !p.eat(&token::COMMA) { + if !p.eat(&token::Comma) { ecx.span_err(sp, "expected token: `,`"); return (invocation, None); } - if p.token == token::EOF { + if p.token == token::Eof { ecx.span_err(sp, "requires at least a format string argument"); return (invocation, None); } let fmtstr = p.parse_expr(); let mut named = false; - while p.token != token::EOF { - if !p.eat(&token::COMMA) { + while p.token != token::Eof { + if !p.eat(&token::Comma) { ecx.span_err(sp, "expected token: `,`"); return (invocation, None); } - if p.token == token::EOF { break } // accept trailing commas + if p.token == token::Eof { break } // accept trailing commas if named || (token::is_ident(&p.token) && - p.look_ahead(1, |t| *t == token::EQ)) { + p.look_ahead(1, |t| *t == token::Eq)) { named = true; let ident = match p.token { - token::IDENT(i, _) => { + token::Ident(i, _) => { p.bump(); i } @@ -139,7 +139,7 @@ fn parse_args(ecx: &mut ExtCtxt, sp: Span, allow_method: bool, }; let interned_name = token::get_ident(ident); let name = interned_name.get(); - p.expect(&token::EQ); + p.expect(&token::Eq); let e = p.parse_expr(); match names.find_equiv(&name) { None => {} diff --git a/src/libsyntax/ext/quote.rs b/src/libsyntax/ext/quote.rs index 6f1fd90adfa4b..39a538f917b00 100644 --- a/src/libsyntax/ext/quote.rs +++ b/src/libsyntax/ext/quote.rs @@ -515,123 +515,122 @@ fn mk_token_path(cx: &ExtCtxt, sp: Span, name: &str) -> P { cx.expr_path(cx.path_global(sp, idents)) } -fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOp) -> P { +fn mk_binop(cx: &ExtCtxt, sp: Span, bop: token::BinOpToken) -> P { let name = match bop { - PLUS => "PLUS", - MINUS => "MINUS", - STAR => "STAR", - SLASH => "SLASH", - PERCENT => "PERCENT", - CARET => "CARET", - AND => "AND", - OR => "OR", - SHL => "SHL", - SHR => "SHR" + token::Plus => "Plus", + token::Minus => "Minus", + token::Star => "Star", + token::Slash => "Slash", + token::Percent => "Percent", + token::Caret => "Caret", + token::And => "And", + token::Or => "Or", + token::Shl => "Shl", + token::Shr => "Shr" }; mk_token_path(cx, sp, name) } fn mk_token(cx: &ExtCtxt, sp: Span, tok: &token::Token) -> P { - match *tok { - BINOP(binop) => { - return cx.expr_call(sp, mk_token_path(cx, sp, "BINOP"), vec!(mk_binop(cx, sp, binop))); + token::BinOp(binop) => { + return cx.expr_call(sp, mk_token_path(cx, sp, "BinOp"), vec!(mk_binop(cx, sp, binop))); } - BINOPEQ(binop) => { - return cx.expr_call(sp, mk_token_path(cx, sp, "BINOPEQ"), + token::BinOpEq(binop) => { + return cx.expr_call(sp, mk_token_path(cx, sp, "BinOpEq"), vec!(mk_binop(cx, sp, binop))); } - LIT_BYTE(i) => { + token::LitByte(i) => { let e_byte = mk_name(cx, sp, i.ident()); - return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_BYTE"), vec!(e_byte)); + return cx.expr_call(sp, mk_token_path(cx, sp, "LitByte"), vec!(e_byte)); } - LIT_CHAR(i) => { + token::LitChar(i) => { let e_char = mk_name(cx, sp, i.ident()); - return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_CHAR"), vec!(e_char)); + return cx.expr_call(sp, mk_token_path(cx, sp, "LitChar"), vec!(e_char)); } - LIT_INTEGER(i) => { + token::LitInteger(i) => { let e_int = mk_name(cx, sp, i.ident()); - return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_INTEGER"), vec!(e_int)); + return cx.expr_call(sp, mk_token_path(cx, sp, "LitInteger"), vec!(e_int)); } - LIT_FLOAT(fident) => { + token::LitFloat(fident) => { let e_fident = mk_name(cx, sp, fident.ident()); - return cx.expr_call(sp, mk_token_path(cx, sp, "LIT_FLOAT"), vec!(e_fident)); + return cx.expr_call(sp, mk_token_path(cx, sp, "LitFloat"), vec!(e_fident)); } - LIT_STR(ident) => { + token::LitStr(ident) => { return cx.expr_call(sp, - mk_token_path(cx, sp, "LIT_STR"), + mk_token_path(cx, sp, "LitStr"), vec!(mk_name(cx, sp, ident.ident()))); } - LIT_STR_RAW(ident, n) => { + token::LitStrRaw(ident, n) => { return cx.expr_call(sp, - mk_token_path(cx, sp, "LIT_STR_RAW"), + mk_token_path(cx, sp, "LitStrRaw"), vec!(mk_name(cx, sp, ident.ident()), cx.expr_uint(sp, n))); } - IDENT(ident, b) => { + token::Ident(ident, b) => { return cx.expr_call(sp, - mk_token_path(cx, sp, "IDENT"), + mk_token_path(cx, sp, "Ident"), vec!(mk_ident(cx, sp, ident), cx.expr_bool(sp, b))); } - LIFETIME(ident) => { + token::Lifetime(ident) => { return cx.expr_call(sp, - mk_token_path(cx, sp, "LIFETIME"), + mk_token_path(cx, sp, "Lifetime"), vec!(mk_ident(cx, sp, ident))); } - DOC_COMMENT(ident) => { + token::DocComment(ident) => { return cx.expr_call(sp, - mk_token_path(cx, sp, "DOC_COMMENT"), + mk_token_path(cx, sp, "DocComment"), vec!(mk_name(cx, sp, ident.ident()))); } - INTERPOLATED(_) => fail!("quote! with interpolated token"), + token::Interpolated(_) => fail!("quote! with interpolated token"), _ => () } let name = match *tok { - EQ => "EQ", - LT => "LT", - LE => "LE", - EQEQ => "EQEQ", - NE => "NE", - GE => "GE", - GT => "GT", - ANDAND => "ANDAND", - OROR => "OROR", - NOT => "NOT", - TILDE => "TILDE", - AT => "AT", - DOT => "DOT", - DOTDOT => "DOTDOT", - COMMA => "COMMA", - SEMI => "SEMI", - COLON => "COLON", - MOD_SEP => "MOD_SEP", - RARROW => "RARROW", - LARROW => "LARROW", - FAT_ARROW => "FAT_ARROW", - LPAREN => "LPAREN", - RPAREN => "RPAREN", - LBRACKET => "LBRACKET", - RBRACKET => "RBRACKET", - LBRACE => "LBRACE", - RBRACE => "RBRACE", - POUND => "POUND", - DOLLAR => "DOLLAR", - UNDERSCORE => "UNDERSCORE", - EOF => "EOF", - _ => fail!() + token::Eq => "Eq", + token::Lt => "Lt", + token::Le => "Le", + token::EqEq => "EqEq", + token::Ne => "Ne", + token::Ge => "Ge", + token::Gt => "Gt", + token::AndAnd => "AndAnd", + token::OrOr => "OrOr", + token::Not => "Not", + token::Tilde => "Tilde", + token::At => "At", + token::Dot => "Dot", + token::DotDot => "DotDot", + token::Comma => "Comma", + token::Semi => "Semi", + token::Colon => "Colon", + token::ModSep => "ModSep", + token::RArrow => "RArrow", + token::LArrow => "LArrow", + token::FatArrow => "FatArrow", + token::LParen => "LParen", + token::RParen => "RParen", + token::LBracket => "LBracket", + token::RBracket => "RBracket", + token::LBrace => "LBrace", + token::RBrace => "RBrace", + token::Pound => "Pound", + token::Dollar => "Dollar", + token::Underscore => "Underscore", + token::Eof => "Eof", + _ => fail!(), }; mk_token_path(cx, sp, name) } @@ -702,7 +701,7 @@ fn expand_tts(cx: &ExtCtxt, sp: Span, tts: &[ast::TokenTree]) p.quote_depth += 1u; let cx_expr = p.parse_expr(); - if !p.eat(&token::COMMA) { + if !p.eat(&token::Comma) { p.fatal("expected token `,`"); } diff --git a/src/libsyntax/ext/tt/macro_parser.rs b/src/libsyntax/ext/tt/macro_parser.rs index cea8cab52654d..6d30de96a3c37 100644 --- a/src/libsyntax/ext/tt/macro_parser.rs +++ b/src/libsyntax/ext/tt/macro_parser.rs @@ -85,7 +85,7 @@ use parse::lexer::*; //resolve bug? use parse::ParseSess; use parse::attr::ParserAttr; use parse::parser::{LifetimeAndTypesWithoutColons, Parser}; -use parse::token::{Token, EOF, Nonterminal}; +use parse::token::{Token, Nonterminal}; use parse::token; use ptr::P; @@ -226,8 +226,8 @@ pub fn parse_or_else(sess: &ParseSess, /// unhygienic comparison) pub fn token_name_eq(t1 : &Token, t2 : &Token) -> bool { match (t1,t2) { - (&token::IDENT(id1,_),&token::IDENT(id2,_)) - | (&token::LIFETIME(id1),&token::LIFETIME(id2)) => + (&token::Ident(id1,_),&token::Ident(id2,_)) + | (&token::Lifetime(id1),&token::Lifetime(id2)) => id1.name == id2.name, _ => *t1 == *t2 } @@ -354,9 +354,9 @@ pub fn parse(sess: &ParseSess, // Built-in nonterminals never start with these tokens, // so we can eliminate them from consideration. match tok { - token::RPAREN | - token::RBRACE | - token::RBRACKET => {}, + token::RParen | + token::RBrace | + token::RBracket => {}, _ => bb_eis.push(ei) } } @@ -372,7 +372,7 @@ pub fn parse(sess: &ParseSess, } /* error messages here could be improved with links to orig. rules */ - if token_name_eq(&tok, &EOF) { + if token_name_eq(&tok, &token::Eof) { if eof_eis.len() == 1u { let mut v = Vec::new(); for dv in eof_eis.get_mut(0).matches.iter_mut() { @@ -447,7 +447,7 @@ pub fn parse_nt(p: &mut Parser, name: &str) -> Nonterminal { "ty" => token::NtTy(p.parse_ty(false /* no need to disambiguate*/)), // this could be handled like a token, since it is one "ident" => match p.token { - token::IDENT(sn,b) => { p.bump(); token::NtIdent(box sn,b) } + token::Ident(sn,b) => { p.bump(); token::NtIdent(box sn,b) } _ => { let token_str = token::to_string(&p.token); p.fatal((format!("expected ident, found {}", diff --git a/src/libsyntax/ext/tt/macro_rules.rs b/src/libsyntax/ext/tt/macro_rules.rs index 3b51fb380b816..20428e50c7f1f 100644 --- a/src/libsyntax/ext/tt/macro_rules.rs +++ b/src/libsyntax/ext/tt/macro_rules.rs @@ -20,7 +20,7 @@ use parse::lexer::new_tt_reader; use parse::parser::Parser; use parse::attr::ParserAttr; use parse::token::{special_idents, gensym_ident}; -use parse::token::{FAT_ARROW, SEMI, NtMatchers, NtTT, EOF}; +use parse::token::{NtMatchers, NtTT}; use parse::token; use print; use ptr::P; @@ -43,10 +43,10 @@ impl<'a> ParserAnyMacro<'a> { /// allowed to be there. fn ensure_complete_parse(&self, allow_semi: bool) { let mut parser = self.parser.borrow_mut(); - if allow_semi && parser.token == SEMI { + if allow_semi && parser.token == token::Semi { parser.bump() } - if parser.token != EOF { + if parser.token != token::Eof { let token_str = parser.this_token_to_string(); let msg = format!("macro expansion ignores token `{}` and any \ following", @@ -89,7 +89,7 @@ impl<'a> MacResult for ParserAnyMacro<'a> { loop { let mut parser = self.parser.borrow_mut(); match parser.token { - EOF => break, + token::Eof => break, _ => { let attrs = parser.parse_outer_attributes(); ret.push(parser.parse_method(attrs, ast::Inherited)) @@ -231,12 +231,13 @@ pub fn add_new_extension<'cx>(cx: &'cx mut ExtCtxt, let argument_gram = vec!( ms(MatchSeq(vec!( ms(MatchNonterminal(lhs_nm, special_idents::matchers, 0u)), - ms(MatchTok(FAT_ARROW)), - ms(MatchNonterminal(rhs_nm, special_idents::tt, 1u))), Some(SEMI), - ast::OneOrMore, 0u, 2u)), + ms(MatchTok(token::FatArrow)), + ms(MatchNonterminal(rhs_nm, special_idents::tt, 1u))), + Some(token::Semi), ast::OneOrMore, 0u, 2u)), //to phase into semicolon-termination instead of //semicolon-separation - ms(MatchSeq(vec!(ms(MatchTok(SEMI))), None, ast::ZeroOrMore, 2u, 2u))); + ms(MatchSeq(vec!(ms(MatchTok(token::Semi))), None, + ast::ZeroOrMore, 2u, 2u))); // Parse the macro_rules! invocation (`none` is for no interpolations): diff --git a/src/libsyntax/ext/tt/transcribe.rs b/src/libsyntax/ext/tt/transcribe.rs index 1bb519f66cd55..2c7b583d46021 100644 --- a/src/libsyntax/ext/tt/transcribe.rs +++ b/src/libsyntax/ext/tt/transcribe.rs @@ -13,7 +13,7 @@ use ast::{TokenTree, TtDelimited, TtToken, TtSequence, TtNonterminal, Ident}; use codemap::{Span, DUMMY_SP}; use diagnostic::SpanHandler; use ext::tt::macro_parser::{NamedMatch, MatchedSeq, MatchedNonterminal}; -use parse::token::{EOF, INTERPOLATED, IDENT, Token, NtIdent}; +use parse::token::{Token, NtIdent}; use parse::token; use parse::lexer::TokenAndSpan; @@ -66,7 +66,7 @@ pub fn new_tt_reader<'a>(sp_diag: &'a SpanHandler, repeat_idx: Vec::new(), repeat_len: Vec::new(), /* dummy values, never read: */ - cur_tok: EOF, + cur_tok: token::Eof, cur_span: DUMMY_SP, }; tt_next_token(&mut r); /* get cur_tok and cur_span set up */ @@ -158,7 +158,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { loop { let should_pop = match r.stack.last() { None => { - assert_eq!(ret_val.tok, EOF); + assert_eq!(ret_val.tok, token::Eof); return ret_val; } Some(frame) => { @@ -175,7 +175,7 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { let prev = r.stack.pop().unwrap(); match r.stack.last_mut() { None => { - r.cur_tok = EOF; + r.cur_tok = token::Eof; return ret_val; } Some(frame) => { @@ -272,13 +272,13 @@ pub fn tt_next_token(r: &mut TtReader) -> TokenAndSpan { (b) we actually can, since it's a token. */ MatchedNonterminal(NtIdent(box sn, b)) => { r.cur_span = sp; - r.cur_tok = IDENT(sn,b); + r.cur_tok = token::Ident(sn,b); return ret_val; } MatchedNonterminal(ref other_whole_nt) => { // FIXME(pcwalton): Bad copy. r.cur_span = sp; - r.cur_tok = INTERPOLATED((*other_whole_nt).clone()); + r.cur_tok = token::Interpolated((*other_whole_nt).clone()); return ret_val; } MatchedSeq(..) => { diff --git a/src/libsyntax/fold.rs b/src/libsyntax/fold.rs index 0f9ab5c6b261e..967ad3a897cde 100644 --- a/src/libsyntax/fold.rs +++ b/src/libsyntax/fold.rs @@ -602,11 +602,11 @@ pub fn noop_fold_tts(tts: &[TokenTree], fld: &mut T) -> Vec(t: token::Token, fld: &mut T) -> token::Token { match t { - token::IDENT(id, followed_by_colons) => { - token::IDENT(fld.fold_ident(id), followed_by_colons) + token::Ident(id, followed_by_colons) => { + token::Ident(fld.fold_ident(id), followed_by_colons) } - token::LIFETIME(id) => token::LIFETIME(fld.fold_ident(id)), - token::INTERPOLATED(nt) => token::INTERPOLATED(fld.fold_interpolated(nt)), + token::Lifetime(id) => token::Lifetime(fld.fold_ident(id)), + token::Interpolated(nt) => token::Interpolated(fld.fold_interpolated(nt)), _ => t } } diff --git a/src/libsyntax/parse/attr.rs b/src/libsyntax/parse/attr.rs index 17dd546ad59d1..458a5042a7e23 100644 --- a/src/libsyntax/parse/attr.rs +++ b/src/libsyntax/parse/attr.rs @@ -14,7 +14,6 @@ use codemap::{spanned, Spanned, mk_sp, Span}; use parse::common::*; //resolve bug? use parse::token; use parse::parser::Parser; -use parse::token::INTERPOLATED; use ptr::P; /// A parser that can parse attributes. @@ -36,10 +35,10 @@ impl<'a> ParserAttr for Parser<'a> { debug!("parse_outer_attributes: self.token={}", self.token); match self.token { - token::POUND => { + token::Pound => { attrs.push(self.parse_attribute(false)); } - token::DOC_COMMENT(s) => { + token::DocComment(s) => { let attr = ::attr::mk_sugared_doc_attr( attr::mk_attr_id(), self.id_to_interned_str(s.ident()), @@ -66,11 +65,11 @@ impl<'a> ParserAttr for Parser<'a> { debug!("parse_attributes: permit_inner={} self.token={}", permit_inner, self.token); let (span, value, mut style) = match self.token { - token::POUND => { + token::Pound => { let lo = self.span.lo; self.bump(); - let style = if self.eat(&token::NOT) { + let style = if self.eat(&token::Not) { if !permit_inner { let span = self.span; self.span_err(span, @@ -82,10 +81,10 @@ impl<'a> ParserAttr for Parser<'a> { ast::AttrOuter }; - self.expect(&token::LBRACKET); + self.expect(&token::LBracket); let meta_item = self.parse_meta_item(); let hi = self.span.hi; - self.expect(&token::RBRACKET); + self.expect(&token::RBracket); (mk_sp(lo, hi), meta_item, style) } @@ -96,7 +95,7 @@ impl<'a> ParserAttr for Parser<'a> { } }; - if permit_inner && self.eat(&token::SEMI) { + if permit_inner && self.eat(&token::Semi) { self.span_warn(span, "this inner attribute syntax is deprecated. \ The new syntax is `#![foo]`, with a bang and no semicolon."); style = ast::AttrInner; @@ -130,10 +129,10 @@ impl<'a> ParserAttr for Parser<'a> { let mut next_outer_attrs: Vec = Vec::new(); loop { let attr = match self.token { - token::POUND => { + token::Pound => { self.parse_attribute(true) } - token::DOC_COMMENT(s) => { + token::DocComment(s) => { // we need to get the position of this token before we bump. let Span { lo, hi, .. } = self.span; self.bump(); @@ -161,7 +160,7 @@ impl<'a> ParserAttr for Parser<'a> { /// | IDENT meta_seq fn parse_meta_item(&mut self) -> P { let nt_meta = match self.token { - token::INTERPOLATED(token::NtMeta(ref e)) => { + token::Interpolated(token::NtMeta(ref e)) => { Some(e.clone()) } _ => None @@ -179,7 +178,7 @@ impl<'a> ParserAttr for Parser<'a> { let ident = self.parse_ident(); let name = self.id_to_interned_str(ident); match self.token { - token::EQ => { + token::Eq => { self.bump(); let lit = self.parse_lit(); // FIXME #623 Non-string meta items are not serialized correctly; @@ -195,7 +194,7 @@ impl<'a> ParserAttr for Parser<'a> { let hi = self.span.hi; P(spanned(lo, hi, ast::MetaNameValue(name, lit))) } - token::LPAREN => { + token::LParen => { let inner_items = self.parse_meta_seq(); let hi = self.span.hi; P(spanned(lo, hi, ast::MetaList(name, inner_items))) @@ -209,15 +208,15 @@ impl<'a> ParserAttr for Parser<'a> { /// matches meta_seq = ( COMMASEP(meta_item) ) fn parse_meta_seq(&mut self) -> Vec> { - self.parse_seq(&token::LPAREN, - &token::RPAREN, - seq_sep_trailing_disallowed(token::COMMA), + self.parse_seq(&token::LParen, + &token::RParen, + seq_sep_trailing_disallowed(token::Comma), |p| p.parse_meta_item()).node } fn parse_optional_meta(&mut self) -> Vec> { match self.token { - token::LPAREN => self.parse_meta_seq(), + token::LParen => self.parse_meta_seq(), _ => Vec::new() } } diff --git a/src/libsyntax/parse/lexer/mod.rs b/src/libsyntax/parse/lexer/mod.rs index 55d071b8d6005..4226c3ce3a4f9 100644 --- a/src/libsyntax/parse/lexer/mod.rs +++ b/src/libsyntax/parse/lexer/mod.rs @@ -69,7 +69,7 @@ impl<'a> Reader for StringReader<'a> { /// Return the next token. EFFECT: advances the string_reader. fn next_token(&mut self) -> TokenAndSpan { let ret_val = TokenAndSpan { - tok: replace(&mut self.peek_tok, token::UNDERSCORE), + tok: replace(&mut self.peek_tok, token::Underscore), sp: self.peek_span, }; self.advance_token(); @@ -92,7 +92,7 @@ impl<'a> Reader for StringReader<'a> { impl<'a> Reader for TtReader<'a> { fn is_eof(&self) -> bool { - self.cur_tok == token::EOF + self.cur_tok == token::Eof } fn next_token(&mut self) -> TokenAndSpan { let r = tt_next_token(self); @@ -136,7 +136,7 @@ impl<'a> StringReader<'a> { curr: Some('\n'), filemap: filemap, /* dummy values; not read */ - peek_tok: token::EOF, + peek_tok: token::Eof, peek_span: codemap::DUMMY_SP, read_embedded_ident: false, }; @@ -213,7 +213,7 @@ impl<'a> StringReader<'a> { }, None => { if self.is_eof() { - self.peek_tok = token::EOF; + self.peek_tok = token::Eof; } else { let start_bytepos = self.last_pos; self.peek_tok = self.next_token_inner(); @@ -396,9 +396,9 @@ impl<'a> StringReader<'a> { return self.with_str_from(start_bpos, |string| { // but comments with only more "/"s are not let tok = if is_doc_comment(string) { - token::DOC_COMMENT(token::intern(string)) + token::DocComment(token::intern(string)) } else { - token::COMMENT + token::Comment }; return Some(TokenAndSpan{ @@ -410,7 +410,7 @@ impl<'a> StringReader<'a> { let start_bpos = self.last_pos - BytePos(2); while !self.curr_is('\n') && !self.is_eof() { self.bump(); } return Some(TokenAndSpan { - tok: token::COMMENT, + tok: token::Comment, sp: codemap::mk_sp(start_bpos, self.last_pos) }); } @@ -440,7 +440,7 @@ impl<'a> StringReader<'a> { let start = self.last_pos; while !self.curr_is('\n') && !self.is_eof() { self.bump(); } return Some(TokenAndSpan { - tok: token::SHEBANG(self.name_from(start)), + tok: token::Shebang(self.name_from(start)), sp: codemap::mk_sp(start, self.last_pos) }); } @@ -466,7 +466,7 @@ impl<'a> StringReader<'a> { let start_bpos = self.last_pos; while is_whitespace(self.curr) { self.bump(); } let c = Some(TokenAndSpan { - tok: token::WS, + tok: token::Whitespace, sp: codemap::mk_sp(start_bpos, self.last_pos) }); debug!("scanning whitespace: {}", c); @@ -519,9 +519,9 @@ impl<'a> StringReader<'a> { self.translate_crlf(start_bpos, string, "bare CR not allowed in block doc-comment") } else { string.into_maybe_owned() }; - token::DOC_COMMENT(token::intern(string.as_slice())) + token::DocComment(token::intern(string.as_slice())) } else { - token::COMMENT + token::Comment }; Some(TokenAndSpan{ @@ -642,17 +642,17 @@ impl<'a> StringReader<'a> { } 'u' | 'i' => { self.scan_int_suffix(); - return token::LIT_INTEGER(self.name_from(start_bpos)); + return token::LitInteger(self.name_from(start_bpos)); }, 'f' => { let last_pos = self.last_pos; self.scan_float_suffix(); self.check_float_base(start_bpos, last_pos, base); - return token::LIT_FLOAT(self.name_from(start_bpos)); + return token::LitFloat(self.name_from(start_bpos)); } _ => { // just a 0 - return token::LIT_INTEGER(self.name_from(start_bpos)); + return token::LitInteger(self.name_from(start_bpos)); } } } else if c.is_digit_radix(10) { @@ -665,7 +665,7 @@ impl<'a> StringReader<'a> { self.err_span_(start_bpos, self.last_pos, "no valid digits found for number"); // eat any suffix self.scan_int_suffix(); - return token::LIT_INTEGER(token::intern("0")); + return token::LitInteger(token::intern("0")); } // might be a float, but don't be greedy if this is actually an @@ -683,13 +683,13 @@ impl<'a> StringReader<'a> { } let last_pos = self.last_pos; self.check_float_base(start_bpos, last_pos, base); - return token::LIT_FLOAT(self.name_from(start_bpos)); + return token::LitFloat(self.name_from(start_bpos)); } else if self.curr_is('f') { // or it might be an integer literal suffixed as a float self.scan_float_suffix(); let last_pos = self.last_pos; self.check_float_base(start_bpos, last_pos, base); - return token::LIT_FLOAT(self.name_from(start_bpos)); + return token::LitFloat(self.name_from(start_bpos)); } else { // it might be a float if it has an exponent if self.curr_is('e') || self.curr_is('E') { @@ -697,11 +697,11 @@ impl<'a> StringReader<'a> { self.scan_float_suffix(); let last_pos = self.last_pos; self.check_float_base(start_bpos, last_pos, base); - return token::LIT_FLOAT(self.name_from(start_bpos)); + return token::LitFloat(self.name_from(start_bpos)); } // but we certainly have an integer! self.scan_int_suffix(); - return token::LIT_INTEGER(self.name_from(start_bpos)); + return token::LitInteger(self.name_from(start_bpos)); } } @@ -889,13 +889,13 @@ impl<'a> StringReader<'a> { } } - fn binop(&mut self, op: token::BinOp) -> token::Token { + fn binop(&mut self, op: token::BinOpToken) -> token::Token { self.bump(); if self.curr_is('=') { self.bump(); - return token::BINOPEQ(op); + return token::BinOpEq(op); } else { - return token::BINOP(op); + return token::BinOp(op); } } @@ -919,12 +919,12 @@ impl<'a> StringReader<'a> { return self.with_str_from(start, |string| { if string == "_" { - token::UNDERSCORE + token::Underscore } else { let is_mod_name = self.curr_is(':') && self.nextch_is(':'); // FIXME: perform NFKC normalization here. (Issue #2253) - token::IDENT(str_to_ident(string), is_mod_name) + token::Ident(str_to_ident(string), is_mod_name) } }) } @@ -938,7 +938,7 @@ impl<'a> StringReader<'a> { ('\x00', Some('n'), Some('a')) => { let ast_ident = self.scan_embedded_hygienic_ident(); let is_mod_name = self.curr_is(':') && self.nextch_is(':'); - return token::IDENT(ast_ident, is_mod_name); + return token::Ident(ast_ident, is_mod_name); } _ => {} } @@ -946,84 +946,84 @@ impl<'a> StringReader<'a> { match c.expect("next_token_inner called at EOF") { // One-byte tokens. - ';' => { self.bump(); return token::SEMI; } - ',' => { self.bump(); return token::COMMA; } + ';' => { self.bump(); return token::Semi; } + ',' => { self.bump(); return token::Comma; } '.' => { self.bump(); return if self.curr_is('.') { self.bump(); if self.curr_is('.') { self.bump(); - token::DOTDOTDOT + token::DotDotDot } else { - token::DOTDOT + token::DotDot } } else { - token::DOT + token::Dot }; } - '(' => { self.bump(); return token::LPAREN; } - ')' => { self.bump(); return token::RPAREN; } - '{' => { self.bump(); return token::LBRACE; } - '}' => { self.bump(); return token::RBRACE; } - '[' => { self.bump(); return token::LBRACKET; } - ']' => { self.bump(); return token::RBRACKET; } - '@' => { self.bump(); return token::AT; } - '#' => { self.bump(); return token::POUND; } - '~' => { self.bump(); return token::TILDE; } - '?' => { self.bump(); return token::QUESTION; } + '(' => { self.bump(); return token::LParen; } + ')' => { self.bump(); return token::RParen; } + '{' => { self.bump(); return token::LBrace; } + '}' => { self.bump(); return token::RBrace; } + '[' => { self.bump(); return token::LBracket; } + ']' => { self.bump(); return token::RBracket; } + '@' => { self.bump(); return token::At; } + '#' => { self.bump(); return token::Pound; } + '~' => { self.bump(); return token::Tilde; } + '?' => { self.bump(); return token::Question; } ':' => { self.bump(); if self.curr_is(':') { self.bump(); - return token::MOD_SEP; + return token::ModSep; } else { - return token::COLON; + return token::Colon; } } - '$' => { self.bump(); return token::DOLLAR; } + '$' => { self.bump(); return token::Dollar; } // Multi-byte tokens. '=' => { self.bump(); if self.curr_is('=') { self.bump(); - return token::EQEQ; + return token::EqEq; } else if self.curr_is('>') { self.bump(); - return token::FAT_ARROW; + return token::FatArrow; } else { - return token::EQ; + return token::Eq; } } '!' => { self.bump(); if self.curr_is('=') { self.bump(); - return token::NE; - } else { return token::NOT; } + return token::Ne; + } else { return token::Not; } } '<' => { self.bump(); match self.curr.unwrap_or('\x00') { - '=' => { self.bump(); return token::LE; } - '<' => { return self.binop(token::SHL); } + '=' => { self.bump(); return token::Le; } + '<' => { return self.binop(token::Shl); } '-' => { self.bump(); match self.curr.unwrap_or('\x00') { - _ => { return token::LARROW; } + _ => { return token::LArrow; } } } - _ => { return token::LT; } + _ => { return token::Lt; } } } '>' => { self.bump(); match self.curr.unwrap_or('\x00') { - '=' => { self.bump(); return token::GE; } - '>' => { return self.binop(token::SHR); } - _ => { return token::GT; } + '=' => { self.bump(); return token::Ge; } + '>' => { return self.binop(token::Shr); } + _ => { return token::Gt; } } } '\'' => { @@ -1056,7 +1056,7 @@ impl<'a> StringReader<'a> { str_to_ident(lifetime_name) }); let keyword_checking_token = - &token::IDENT(keyword_checking_ident, false); + &token::Ident(keyword_checking_ident, false); let last_bpos = self.last_pos; if token::is_keyword(token::keywords::Self, keyword_checking_token) { @@ -1071,7 +1071,7 @@ impl<'a> StringReader<'a> { last_bpos, "invalid lifetime name"); } - return token::LIFETIME(ident); + return token::Lifetime(ident); } // Otherwise it is a character constant: @@ -1087,7 +1087,7 @@ impl<'a> StringReader<'a> { } let id = if valid { self.name_from(start) } else { token::intern("0") }; self.bump(); // advance curr past token - return token::LIT_CHAR(id); + return token::LitChar(id); } 'b' => { self.bump(); @@ -1095,7 +1095,7 @@ impl<'a> StringReader<'a> { Some('\'') => self.scan_byte(), Some('"') => self.scan_byte_string(), Some('r') => self.scan_raw_byte_string(), - _ => unreachable!() // Should have been a token::IDENT above. + _ => unreachable!() // Should have been a token::Ident above. }; } @@ -1118,7 +1118,7 @@ impl<'a> StringReader<'a> { let id = if valid { self.name_from(start_bpos + BytePos(1)) } else { token::intern("??") }; self.bump(); - return token::LIT_STR(id); + return token::LitStr(id); } 'r' => { let start_bpos = self.last_pos; @@ -1185,33 +1185,33 @@ impl<'a> StringReader<'a> { } else { token::intern("??") }; - return token::LIT_STR_RAW(id, hash_count); + return token::LitStrRaw(id, hash_count); } '-' => { if self.nextch_is('>') { self.bump(); self.bump(); - return token::RARROW; - } else { return self.binop(token::MINUS); } + return token::RArrow; + } else { return self.binop(token::Minus); } } '&' => { if self.nextch_is('&') { self.bump(); self.bump(); - return token::ANDAND; - } else { return self.binop(token::AND); } + return token::AndAnd; + } else { return self.binop(token::And); } } '|' => { match self.nextch() { - Some('|') => { self.bump(); self.bump(); return token::OROR; } - _ => { return self.binop(token::OR); } + Some('|') => { self.bump(); self.bump(); return token::OrOr; } + _ => { return self.binop(token::Or); } } } - '+' => { return self.binop(token::PLUS); } - '*' => { return self.binop(token::STAR); } - '/' => { return self.binop(token::SLASH); } - '^' => { return self.binop(token::CARET); } - '%' => { return self.binop(token::PERCENT); } + '+' => { return self.binop(token::Plus); } + '*' => { return self.binop(token::Star); } + '/' => { return self.binop(token::Slash); } + '^' => { return self.binop(token::Caret); } + '%' => { return self.binop(token::Percent); } c => { let last_bpos = self.last_pos; let bpos = self.pos; @@ -1275,7 +1275,7 @@ impl<'a> StringReader<'a> { let id = if valid { self.name_from(start) } else { token::intern("??") }; self.bump(); // advance curr past token - return token::LIT_BYTE(id); + return token::LitByte(id); } fn scan_byte_string(&mut self) -> token::Token { @@ -1297,7 +1297,7 @@ impl<'a> StringReader<'a> { } let id = if valid { self.name_from(start) } else { token::intern("??") }; self.bump(); - return token::LIT_BINARY(id); + return token::LitBinary(id); } fn scan_raw_byte_string(&mut self) -> token::Token { @@ -1348,7 +1348,7 @@ impl<'a> StringReader<'a> { self.bump(); } self.bump(); - return token::LIT_BINARY_RAW(self.name_from_to(content_start_bpos, content_end_bpos), + return token::LitBinaryRaw(self.name_from_to(content_start_bpos, content_end_bpos), hash_count); } } @@ -1431,20 +1431,20 @@ mod test { "/* my source file */ \ fn main() { println!(\"zebra\"); }\n".to_string()); let id = str_to_ident("fn"); - assert_eq!(string_reader.next_token().tok, token::COMMENT); - assert_eq!(string_reader.next_token().tok, token::WS); + assert_eq!(string_reader.next_token().tok, token::Comment); + assert_eq!(string_reader.next_token().tok, token::Whitespace); let tok1 = string_reader.next_token(); let tok2 = TokenAndSpan{ - tok:token::IDENT(id, false), + tok:token::Ident(id, false), sp:Span {lo:BytePos(21),hi:BytePos(23),expn_id: NO_EXPANSION}}; assert_eq!(tok1,tok2); - assert_eq!(string_reader.next_token().tok, token::WS); + assert_eq!(string_reader.next_token().tok, token::Whitespace); // the 'main' id is already read: assert_eq!(string_reader.last_pos.clone(), BytePos(28)); // read another token: let tok3 = string_reader.next_token(); let tok4 = TokenAndSpan{ - tok:token::IDENT(str_to_ident("main"), false), + tok:token::Ident(str_to_ident("main"), false), sp:Span {lo:BytePos(24),hi:BytePos(28),expn_id: NO_EXPANSION}}; assert_eq!(tok3,tok4); // the lparen is already read: @@ -1461,64 +1461,64 @@ mod test { // make the identifier by looking up the string in the interner fn mk_ident (id: &str, is_mod_name: bool) -> token::Token { - token::IDENT (str_to_ident(id),is_mod_name) + token::Ident (str_to_ident(id),is_mod_name) } #[test] fn doublecolonparsing () { check_tokenization(setup(&mk_sh(), "a b".to_string()), vec!(mk_ident("a",false), - token::WS, + token::Whitespace, mk_ident("b",false))); } #[test] fn dcparsing_2 () { check_tokenization(setup(&mk_sh(), "a::b".to_string()), vec!(mk_ident("a",true), - token::MOD_SEP, + token::ModSep, mk_ident("b",false))); } #[test] fn dcparsing_3 () { check_tokenization(setup(&mk_sh(), "a ::b".to_string()), vec!(mk_ident("a",false), - token::WS, - token::MOD_SEP, + token::Whitespace, + token::ModSep, mk_ident("b",false))); } #[test] fn dcparsing_4 () { check_tokenization(setup(&mk_sh(), "a:: b".to_string()), vec!(mk_ident("a",true), - token::MOD_SEP, - token::WS, + token::ModSep, + token::Whitespace, mk_ident("b",false))); } #[test] fn character_a() { assert_eq!(setup(&mk_sh(), "'a'".to_string()).next_token().tok, - token::LIT_CHAR(token::intern("a"))); + token::LitChar(token::intern("a"))); } #[test] fn character_space() { assert_eq!(setup(&mk_sh(), "' '".to_string()).next_token().tok, - token::LIT_CHAR(token::intern(" "))); + token::LitChar(token::intern(" "))); } #[test] fn character_escaped() { assert_eq!(setup(&mk_sh(), "'\\n'".to_string()).next_token().tok, - token::LIT_CHAR(token::intern("\\n"))); + token::LitChar(token::intern("\\n"))); } #[test] fn lifetime_name() { assert_eq!(setup(&mk_sh(), "'abc".to_string()).next_token().tok, - token::LIFETIME(token::str_to_ident("'abc"))); + token::Lifetime(token::str_to_ident("'abc"))); } #[test] fn raw_string() { assert_eq!(setup(&mk_sh(), "r###\"\"#a\\b\x00c\"\"###".to_string()).next_token() .tok, - token::LIT_STR_RAW(token::intern("\"#a\\b\x00c\""), 3)); + token::LitStrRaw(token::intern("\"#a\\b\x00c\""), 3)); } #[test] fn line_doc_comments() { @@ -1531,10 +1531,10 @@ mod test { let sh = mk_sh(); let mut lexer = setup(&sh, "/* /* */ */'a'".to_string()); match lexer.next_token().tok { - token::COMMENT => { }, + token::Comment => { }, _ => fail!("expected a comment!") } - assert_eq!(lexer.next_token().tok, token::LIT_CHAR(token::intern("a"))); + assert_eq!(lexer.next_token().tok, token::LitChar(token::intern("a"))); } } diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index 2965094f23662..6c0df39daebba 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -793,34 +793,34 @@ mod test { let tts = string_to_tts("macro_rules! zip (($a)=>($a))".to_string()); let tts: &[ast::TokenTree] = tts.as_slice(); match tts { - [ast::TtToken(_, token::IDENT(name_macro_rules, false)), - ast::TtToken(_, token::NOT), - ast::TtToken(_, token::IDENT(name_zip, false)), + [ast::TtToken(_, token::Ident(name_macro_rules, false)), + ast::TtToken(_, token::Not), + ast::TtToken(_, token::Ident(name_zip, false)), ast::TtDelimited(_, ref macro_delimed)] if name_macro_rules.as_str() == "macro_rules" && name_zip.as_str() == "zip" => { let (ref macro_open, ref macro_tts, ref macro_close) = **macro_delimed; match (macro_open, macro_tts.as_slice(), macro_close) { - (&ast::Delimiter { token: token::LPAREN, .. }, + (&ast::Delimiter { token: token::LParen, .. }, [ast::TtDelimited(_, ref first_delimed), - ast::TtToken(_, token::FAT_ARROW), + ast::TtToken(_, token::FatArrow), ast::TtDelimited(_, ref second_delimed)], - &ast::Delimiter { token: token::RPAREN, .. }) => { + &ast::Delimiter { token: token::RParen, .. }) => { let (ref first_open, ref first_tts, ref first_close) = **first_delimed; match (first_open, first_tts.as_slice(), first_close) { - (&ast::Delimiter { token: token::LPAREN, .. }, - [ast::TtToken(_, token::DOLLAR), - ast::TtToken(_, token::IDENT(name, false))], - &ast::Delimiter { token: token::RPAREN, .. }) + (&ast::Delimiter { token: token::LParen, .. }, + [ast::TtToken(_, token::Dollar), + ast::TtToken(_, token::Ident(name, false))], + &ast::Delimiter { token: token::RParen, .. }) if name.as_str() == "a" => {}, _ => fail!("value 3: {}", **first_delimed), } let (ref second_open, ref second_tts, ref second_close) = **second_delimed; match (second_open, second_tts.as_slice(), second_close) { - (&ast::Delimiter { token: token::LPAREN, .. }, - [ast::TtToken(_, token::DOLLAR), - ast::TtToken(_, token::IDENT(name, false))], - &ast::Delimiter { token: token::RPAREN, .. }) + (&ast::Delimiter { token: token::LParen, .. }, + [ast::TtToken(_, token::Dollar), + ast::TtToken(_, token::Ident(name, false))], + &ast::Delimiter { token: token::RParen, .. }) if name.as_str() == "a" => {}, _ => fail!("value 4: {}", **second_delimed), } @@ -842,7 +842,7 @@ mod test { \"fields\":[\ null,\ {\ - \"variant\":\"IDENT\",\ + \"variant\":\"Ident\",\ \"fields\":[\ \"fn\",\ false\ @@ -855,7 +855,7 @@ mod test { \"fields\":[\ null,\ {\ - \"variant\":\"IDENT\",\ + \"variant\":\"Ident\",\ \"fields\":[\ \"a\",\ false\ @@ -870,7 +870,7 @@ mod test { [\ {\ \"span\":null,\ - \"token\":\"LPAREN\"\ + \"token\":\"LParen\"\ },\ [\ {\ @@ -878,7 +878,7 @@ mod test { \"fields\":[\ null,\ {\ - \"variant\":\"IDENT\",\ + \"variant\":\"Ident\",\ \"fields\":[\ \"b\",\ false\ @@ -890,7 +890,7 @@ mod test { \"variant\":\"TtToken\",\ \"fields\":[\ null,\ - \"COLON\"\ + \"Colon\"\ ]\ },\ {\ @@ -898,7 +898,7 @@ mod test { \"fields\":[\ null,\ {\ - \"variant\":\"IDENT\",\ + \"variant\":\"Ident\",\ \"fields\":[\ \"int\",\ false\ @@ -909,7 +909,7 @@ mod test { ],\ {\ \"span\":null,\ - \"token\":\"RPAREN\"\ + \"token\":\"RParen\"\ }\ ]\ ]\ @@ -921,7 +921,7 @@ mod test { [\ {\ \"span\":null,\ - \"token\":\"LBRACE\"\ + \"token\":\"LBrace\"\ },\ [\ {\ @@ -929,7 +929,7 @@ mod test { \"fields\":[\ null,\ {\ - \"variant\":\"IDENT\",\ + \"variant\":\"Ident\",\ \"fields\":[\ \"b\",\ false\ @@ -941,13 +941,13 @@ mod test { \"variant\":\"TtToken\",\ \"fields\":[\ null,\ - \"SEMI\"\ + \"Semi\"\ ]\ }\ ],\ {\ \"span\":null,\ - \"token\":\"RBRACE\"\ + \"token\":\"RBrace\"\ }\ ]\ ]\ @@ -1002,7 +1002,7 @@ mod test { } fn parser_done(p: Parser){ - assert_eq!(p.token.clone(), token::EOF); + assert_eq!(p.token.clone(), token::Eof); } #[test] fn parse_ident_pat () { diff --git a/src/libsyntax/parse/obsolete.rs b/src/libsyntax/parse/obsolete.rs index 1a6fb9b85dd25..73787763c8b58 100644 --- a/src/libsyntax/parse/obsolete.rs +++ b/src/libsyntax/parse/obsolete.rs @@ -118,7 +118,7 @@ impl<'a> ParserObsoleteMethods for parser::Parser<'a> { fn is_obsolete_ident(&mut self, ident: &str) -> bool { match self.token { - token::IDENT(sid, _) => { + token::Ident(sid, _) => { token::get_ident(sid).equiv(&ident) } _ => false diff --git a/src/libsyntax/parse/parser.rs b/src/libsyntax/parse/parser.rs index 7bf751c2d5ebf..bd977962e91a1 100644 --- a/src/libsyntax/parse/parser.rs +++ b/src/libsyntax/parse/parser.rs @@ -74,7 +74,7 @@ use parse::common::{seq_sep_trailing_allowed}; use parse::lexer::Reader; use parse::lexer::TokenAndSpan; use parse::obsolete::*; -use parse::token::{INTERPOLATED, InternedString, can_begin_expr}; +use parse::token::{InternedString, can_begin_expr}; use parse::token::{is_ident, is_ident_or_path, is_plain_ident}; use parse::token::{keywords, special_idents, token_to_binop}; use parse::token; @@ -134,34 +134,33 @@ enum ItemOrViewItem { } -/// Possibly accept an `INTERPOLATED` expression (a pre-parsed expression -/// dropped into the token stream, which happens while parsing the -/// result of macro expansion) -/// Placement of these is not as complex as I feared it would be. -/// The important thing is to make sure that lookahead doesn't balk -/// at INTERPOLATED tokens +/// Possibly accept an `token::Interpolated` expression (a pre-parsed expression +/// dropped into the token stream, which happens while parsing the result of +/// macro expansion). Placement of these is not as complex as I feared it would +/// be. The important thing is to make sure that lookahead doesn't balk at +/// `token::Interpolated` tokens. macro_rules! maybe_whole_expr ( ($p:expr) => ( { let found = match $p.token { - INTERPOLATED(token::NtExpr(ref e)) => { + token::Interpolated(token::NtExpr(ref e)) => { Some((*e).clone()) } - INTERPOLATED(token::NtPath(_)) => { + token::Interpolated(token::NtPath(_)) => { // FIXME: The following avoids an issue with lexical borrowck scopes, // but the clone is unfortunate. let pt = match $p.token { - INTERPOLATED(token::NtPath(ref pt)) => (**pt).clone(), + token::Interpolated(token::NtPath(ref pt)) => (**pt).clone(), _ => unreachable!() }; let span = $p.span; Some($p.mk_expr(span.lo, span.hi, ExprPath(pt))) } - INTERPOLATED(token::NtBlock(_)) => { + token::Interpolated(token::NtBlock(_)) => { // FIXME: The following avoids an issue with lexical borrowck scopes, // but the clone is unfortunate. let b = match $p.token { - INTERPOLATED(token::NtBlock(ref b)) => (*b).clone(), + token::Interpolated(token::NtBlock(ref b)) => (*b).clone(), _ => unreachable!() }; let span = $p.span; @@ -185,13 +184,13 @@ macro_rules! maybe_whole ( ($p:expr, $constructor:ident) => ( { let found = match ($p).token { - INTERPOLATED(token::$constructor(_)) => { + token::Interpolated(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { - Some(INTERPOLATED(token::$constructor(x))) => { + Some(token::Interpolated(token::$constructor(x))) => { return x.clone() } _ => {} @@ -201,13 +200,13 @@ macro_rules! maybe_whole ( (no_clone $p:expr, $constructor:ident) => ( { let found = match ($p).token { - INTERPOLATED(token::$constructor(_)) => { + token::Interpolated(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { - Some(INTERPOLATED(token::$constructor(x))) => { + Some(token::Interpolated(token::$constructor(x))) => { return x } _ => {} @@ -217,13 +216,13 @@ macro_rules! maybe_whole ( (deref $p:expr, $constructor:ident) => ( { let found = match ($p).token { - INTERPOLATED(token::$constructor(_)) => { + token::Interpolated(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { - Some(INTERPOLATED(token::$constructor(x))) => { + Some(token::Interpolated(token::$constructor(x))) => { return (*x).clone() } _ => {} @@ -233,13 +232,13 @@ macro_rules! maybe_whole ( (Some $p:expr, $constructor:ident) => ( { let found = match ($p).token { - INTERPOLATED(token::$constructor(_)) => { + token::Interpolated(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { - Some(INTERPOLATED(token::$constructor(x))) => { + Some(token::Interpolated(token::$constructor(x))) => { return Some(x.clone()), } _ => {} @@ -249,13 +248,13 @@ macro_rules! maybe_whole ( (iovi $p:expr, $constructor:ident) => ( { let found = match ($p).token { - INTERPOLATED(token::$constructor(_)) => { + token::Interpolated(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { - Some(INTERPOLATED(token::$constructor(x))) => { + Some(token::Interpolated(token::$constructor(x))) => { return IoviItem(x.clone()) } _ => {} @@ -265,13 +264,13 @@ macro_rules! maybe_whole ( (pair_empty $p:expr, $constructor:ident) => ( { let found = match ($p).token { - INTERPOLATED(token::$constructor(_)) => { + token::Interpolated(token::$constructor(_)) => { Some(($p).bump_and_get()) } _ => None }; match found { - Some(INTERPOLATED(token::$constructor(x))) => { + Some(token::Interpolated(token::$constructor(x))) => { return (Vec::new(), x) } _ => {} @@ -336,7 +335,7 @@ pub struct Parser<'a> { } fn is_plain_ident_or_underscore(t: &token::Token) -> bool { - is_plain_ident(t) || *t == token::UNDERSCORE + is_plain_ident(t) || *t == token::Underscore } /// Get a token the parser cares about @@ -344,7 +343,7 @@ fn real_token(rdr: &mut Reader) -> TokenAndSpan { let mut t = rdr.next_token(); loop { match t.tok { - token::WS | token::COMMENT | token::SHEBANG(_) => { + token::Whitespace | token::Comment | token::Shebang(_) => { t = rdr.next_token(); }, _ => break @@ -362,7 +361,7 @@ impl<'a> Parser<'a> { let tok0 = real_token(&mut *rdr); let span = tok0.sp; let placeholder = TokenAndSpan { - tok: token::UNDERSCORE, + tok: token::Underscore, sp: span, }; @@ -475,15 +474,15 @@ impl<'a> Parser<'a> { /// recover (without consuming any expected input token). Returns /// true if and only if input was consumed for recovery. pub fn check_for_erroneous_unit_struct_expecting(&mut self, expected: &[token::Token]) -> bool { - if self.token == token::LBRACE - && expected.iter().all(|t| *t != token::LBRACE) - && self.look_ahead(1, |t| *t == token::RBRACE) { + if self.token == token::LBrace + && expected.iter().all(|t| *t != token::LBrace) + && self.look_ahead(1, |t| *t == token::RBrace) { // matched; signal non-fatal error and recover. let span = self.span; self.span_err(span, "unit-like struct construction is written with no trailing `{ }`"); - self.eat(&token::LBRACE); - self.eat(&token::RBRACE); + self.eat(&token::LBrace); + self.eat(&token::RBrace); true } else { false @@ -535,11 +534,11 @@ impl<'a> Parser<'a> { self.check_strict_keywords(); self.check_reserved_keywords(); match self.token { - token::IDENT(i, _) => { + token::Ident(i, _) => { self.bump(); i } - token::INTERPOLATED(token::NtIdent(..)) => { + token::Interpolated(token::NtIdent(..)) => { self.bug("ident interpolation not converted to real token"); } _ => { @@ -621,16 +620,16 @@ impl<'a> Parser<'a> { /// `&` and continue. If an `&` is not seen, signal an error. fn expect_and(&mut self) { match self.token { - token::BINOP(token::AND) => self.bump(), - token::ANDAND => { + token::BinOp(token::And) => self.bump(), + token::AndAnd => { let span = self.span; let lo = span.lo + BytePos(1); - self.replace_token(token::BINOP(token::AND), lo, span.hi) + self.replace_token(token::BinOp(token::And), lo, span.hi) } _ => { let token_str = self.this_token_to_string(); let found_token = - Parser::token_to_string(&token::BINOP(token::AND)); + Parser::token_to_string(&token::BinOp(token::And)); self.fatal(format!("expected `{}`, found `{}`", found_token, token_str).as_slice()) @@ -642,16 +641,16 @@ impl<'a> Parser<'a> { /// `|` and continue. If a `|` is not seen, signal an error. fn expect_or(&mut self) { match self.token { - token::BINOP(token::OR) => self.bump(), - token::OROR => { + token::BinOp(token::Or) => self.bump(), + token::OrOr => { let span = self.span; let lo = span.lo + BytePos(1); - self.replace_token(token::BINOP(token::OR), lo, span.hi) + self.replace_token(token::BinOp(token::Or), lo, span.hi) } _ => { let found_token = self.this_token_to_string(); let token_str = - Parser::token_to_string(&token::BINOP(token::OR)); + Parser::token_to_string(&token::BinOp(token::Or)); self.fatal(format!("expected `{}`, found `{}`", token_str, found_token).as_slice()) @@ -681,16 +680,16 @@ impl<'a> Parser<'a> { /// impl Foo<<'a> ||>() { ... } fn eat_lt(&mut self, force: bool) -> bool { match self.token { - token::LT => { self.bump(); true } - token::BINOP(token::SHL) => { + token::Lt => { self.bump(); true } + token::BinOp(token::Shl) => { let next_lifetime = self.look_ahead(1, |t| match *t { - token::LIFETIME(..) => true, + token::Lifetime(..) => true, _ => false, }); if force || next_lifetime { let span = self.span; let lo = span.lo + BytePos(1); - self.replace_token(token::LT, lo, span.hi); + self.replace_token(token::Lt, lo, span.hi); true } else { false @@ -703,7 +702,7 @@ impl<'a> Parser<'a> { fn expect_lt(&mut self) { if !self.eat_lt(true) { let found_token = self.this_token_to_string(); - let token_str = Parser::token_to_string(&token::LT); + let token_str = Parser::token_to_string(&token::Lt); self.fatal(format!("expected `{}`, found `{}`", token_str, found_token).as_slice()) @@ -718,8 +717,8 @@ impl<'a> Parser<'a> { -> Vec { let mut first = true; let mut vector = Vec::new(); - while self.token != token::BINOP(token::OR) && - self.token != token::OROR { + while self.token != token::BinOp(token::Or) && + self.token != token::OrOr { if first { first = false } else { @@ -736,24 +735,24 @@ impl<'a> Parser<'a> { /// signal an error. pub fn expect_gt(&mut self) { match self.token { - token::GT => self.bump(), - token::BINOP(token::SHR) => { + token::Gt => self.bump(), + token::BinOp(token::Shr) => { let span = self.span; let lo = span.lo + BytePos(1); - self.replace_token(token::GT, lo, span.hi) + self.replace_token(token::Gt, lo, span.hi) } - token::BINOPEQ(token::SHR) => { + token::BinOpEq(token::Shr) => { let span = self.span; let lo = span.lo + BytePos(1); - self.replace_token(token::GE, lo, span.hi) + self.replace_token(token::Ge, lo, span.hi) } - token::GE => { + token::Ge => { let span = self.span; let lo = span.lo + BytePos(1); - self.replace_token(token::EQ, lo, span.hi) + self.replace_token(token::Eq, lo, span.hi) } _ => { - let gt_str = Parser::token_to_string(&token::GT); + let gt_str = Parser::token_to_string(&token::Gt); let this_token_str = self.this_token_to_string(); self.fatal(format!("expected `{}`, found `{}`", gt_str, @@ -777,10 +776,10 @@ impl<'a> Parser<'a> { // commas in generic parameters, because it can stop either after // parsing a type or after parsing a comma. for i in iter::count(0u, 1) { - if self.token == token::GT - || self.token == token::BINOP(token::SHR) - || self.token == token::GE - || self.token == token::BINOPEQ(token::SHR) { + if self.token == token::Gt + || self.token == token::BinOp(token::Shr) + || self.token == token::Ge + || self.token == token::BinOpEq(token::Shr) { break; } @@ -911,7 +910,7 @@ impl<'a> Parser<'a> { self.buffer_start = next_index as int; let placeholder = TokenAndSpan { - tok: token::UNDERSCORE, + tok: token::Underscore, sp: self.span, }; replace(&mut self.buffer[buffer_start], placeholder) @@ -923,7 +922,7 @@ impl<'a> Parser<'a> { /// Advance the parser by one token and return the bumped token. pub fn bump_and_get(&mut self) -> token::Token { - let old_token = replace(&mut self.token, token::UNDERSCORE); + let old_token = replace(&mut self.token, token::Underscore); self.bump(); old_token } @@ -1015,14 +1014,14 @@ impl<'a> Parser<'a> { pub fn token_is_lifetime(tok: &token::Token) -> bool { match *tok { - token::LIFETIME(..) => true, + token::Lifetime(..) => true, _ => false, } } pub fn get_lifetime(&mut self) -> ast::Ident { match self.token { - token::LIFETIME(ref ident) => *ident, + token::Lifetime(ref ident) => *ident, _ => self.bug("not a lifetime"), } } @@ -1074,7 +1073,7 @@ impl<'a> Parser<'a> { */ - let lifetime_defs = if self.eat(&token::LT) { + let lifetime_defs = if self.eat(&token::Lt) { let lifetime_defs = self.parse_lifetime_defs(); self.expect_gt(); lifetime_defs @@ -1103,25 +1102,25 @@ impl<'a> Parser<'a> { /// Parses an optional unboxed closure kind (`&:`, `&mut:`, or `:`). pub fn parse_optional_unboxed_closure_kind(&mut self) -> Option { - if self.token == token::BINOP(token::AND) && + if self.token == token::BinOp(token::And) && self.look_ahead(1, |t| { token::is_keyword(keywords::Mut, t) }) && - self.look_ahead(2, |t| *t == token::COLON) { + self.look_ahead(2, |t| *t == token::Colon) { self.bump(); self.bump(); self.bump(); return Some(FnMutUnboxedClosureKind) } - if self.token == token::BINOP(token::AND) && - self.look_ahead(1, |t| *t == token::COLON) { + if self.token == token::BinOp(token::And) && + self.look_ahead(1, |t| *t == token::Colon) { self.bump(); self.bump(); return Some(FnUnboxedClosureKind) } - if self.eat(&token::COLON) { + if self.eat(&token::Colon) { return Some(FnOnceUnboxedClosureKind) } @@ -1147,7 +1146,7 @@ impl<'a> Parser<'a> { let fn_style = self.parse_unsafety(); let onceness = if self.eat_keyword(keywords::Once) {Once} else {Many}; - let lifetime_defs = if self.eat(&token::LT) { + let lifetime_defs = if self.eat(&token::Lt) { let lifetime_defs = self.parse_lifetime_defs(); self.expect_gt(); @@ -1156,7 +1155,7 @@ impl<'a> Parser<'a> { Vec::new() }; - let (optional_unboxed_closure_kind, inputs) = if self.eat(&token::OROR) { + let (optional_unboxed_closure_kind, inputs) = if self.eat(&token::OrOr) { (None, Vec::new()) } else { self.expect_or(); @@ -1165,7 +1164,7 @@ impl<'a> Parser<'a> { self.parse_optional_unboxed_closure_kind(); let inputs = self.parse_seq_to_before_or( - &token::COMMA, + &token::Comma, |p| p.parse_arg_general(false)); self.expect_or(); (optional_unboxed_closure_kind, inputs) @@ -1221,7 +1220,7 @@ impl<'a> Parser<'a> { Lifetime_defs */ - let lifetime_defs = if self.eat(&token::LT) { + let lifetime_defs = if self.eat(&token::Lt) { let lifetime_defs = self.parse_lifetime_defs(); self.expect_gt(); lifetime_defs @@ -1247,7 +1246,7 @@ impl<'a> Parser<'a> { let lo = self.span.lo; let ident = self.parse_ident(); let hi = self.span.hi; - self.expect(&token::SEMI); + self.expect(&token::Semi); AssociatedType { id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), @@ -1262,10 +1261,10 @@ impl<'a> Parser<'a> { -> Typedef { let lo = self.span.lo; let ident = self.parse_ident(); - self.expect(&token::EQ); + self.expect(&token::Eq); let typ = self.parse_ty(true); let hi = self.span.hi; - self.expect(&token::SEMI); + self.expect(&token::Semi); Typedef { id: ast::DUMMY_NODE_ID, span: mk_sp(lo, hi), @@ -1279,8 +1278,8 @@ impl<'a> Parser<'a> { /// Parse the items in a trait declaration pub fn parse_trait_items(&mut self) -> Vec { self.parse_unspanned_seq( - &token::LBRACE, - &token::RBRACE, + &token::LBrace, + &token::RBrace, seq_sep_none(), |p| { let attrs = p.parse_outer_attributes(); @@ -1317,7 +1316,7 @@ impl<'a> Parser<'a> { let hi = p.last_span.hi; match p.token { - token::SEMI => { + token::Semi => { p.bump(); debug!("parse_trait_methods(): parsing required method"); RequiredMethod(TypeMethod { @@ -1333,7 +1332,7 @@ impl<'a> Parser<'a> { vis: vis, }) } - token::LBRACE => { + token::LBrace => { debug!("parse_trait_methods(): parsing provided method"); let (inner_attrs, body) = p.parse_inner_attrs_and_block(); @@ -1377,7 +1376,7 @@ impl<'a> Parser<'a> { let lo = self.span.lo; let mutbl = self.parse_mutability(); let id = self.parse_ident(); - self.expect(&token::COLON); + self.expect(&token::Colon); let ty = self.parse_ty(true); let hi = ty.span.hi; ast::TypeField { @@ -1389,9 +1388,9 @@ impl<'a> Parser<'a> { /// Parse optional return type [ -> TY ] in function decl pub fn parse_ret_ty(&mut self) -> (RetStyle, P) { - return if self.eat(&token::RARROW) { + return if self.eat(&token::RArrow) { let lo = self.span.lo; - if self.eat(&token::NOT) { + if self.eat(&token::Not) { ( NoReturn, P(Ty { @@ -1425,9 +1424,9 @@ impl<'a> Parser<'a> { let lo = self.span.lo; - let t = if self.token == token::LPAREN { + let t = if self.token == token::LParen { self.bump(); - if self.token == token::RPAREN { + if self.token == token::RParen { self.bump(); TyNil } else { @@ -1436,9 +1435,9 @@ impl<'a> Parser<'a> { // of type t let mut ts = vec!(self.parse_ty(true)); let mut one_tuple = false; - while self.token == token::COMMA { + while self.token == token::Comma { self.bump(); - if self.token != token::RPAREN { + if self.token != token::RParen { ts.push(self.parse_ty(true)); } else { @@ -1447,30 +1446,30 @@ impl<'a> Parser<'a> { } if ts.len() == 1 && !one_tuple { - self.expect(&token::RPAREN); + self.expect(&token::RParen); TyParen(ts.into_iter().nth(0).unwrap()) } else { let t = TyTup(ts); - self.expect(&token::RPAREN); + self.expect(&token::RParen); t } } - } else if self.token == token::TILDE { + } else if self.token == token::Tilde { // OWNED POINTER self.bump(); let last_span = self.last_span; match self.token { - token::LBRACKET => self.obsolete(last_span, ObsoleteOwnedVector), + token::LBracket => self.obsolete(last_span, ObsoleteOwnedVector), _ => self.obsolete(last_span, ObsoleteOwnedType) } TyUniq(self.parse_ty(false)) - } else if self.token == token::BINOP(token::STAR) { + } else if self.token == token::BinOp(token::Star) { // STAR POINTER (bare pointer?) self.bump(); TyPtr(self.parse_ptr()) - } else if self.token == token::LBRACKET { + } else if self.token == token::LBracket { // VECTOR - self.expect(&token::LBRACKET); + self.expect(&token::LBracket); let t = self.parse_ty(true); // Parse the `, ..e` in `[ int, ..e ]` @@ -1479,10 +1478,10 @@ impl<'a> Parser<'a> { None => TyVec(t), Some(suffix) => TyFixedLengthVec(t, suffix) }; - self.expect(&token::RBRACKET); + self.expect(&token::RBracket); t - } else if self.token == token::BINOP(token::AND) || - self.token == token::ANDAND { + } else if self.token == token::BinOp(token::And) || + self.token == token::AndAnd { // BORROWED POINTER self.expect_and(); self.parse_borrowed_pointee() @@ -1492,11 +1491,11 @@ impl<'a> Parser<'a> { // BARE FUNCTION self.parse_ty_bare_fn() } else if self.token_is_closure_keyword() || - self.token == token::BINOP(token::OR) || - self.token == token::OROR || - (self.token == token::LT && + self.token == token::BinOp(token::Or) || + self.token == token::OrOr || + (self.token == token::Lt && self.look_ahead(1, |t| { - *t == token::GT || Parser::token_is_lifetime(t) + *t == token::Gt || Parser::token_is_lifetime(t) })) { // CLOSURE @@ -1504,27 +1503,27 @@ impl<'a> Parser<'a> { } else if self.eat_keyword(keywords::Typeof) { // TYPEOF // In order to not be ambiguous, the type must be surrounded by parens. - self.expect(&token::LPAREN); + self.expect(&token::LParen); let e = self.parse_expr(); - self.expect(&token::RPAREN); + self.expect(&token::RParen); TyTypeof(e) } else if self.eat_keyword(keywords::Proc) { self.parse_proc_type() - } else if self.token == token::LT { + } else if self.token == token::Lt { // QUALIFIED PATH self.bump(); let for_type = self.parse_ty(true); self.expect_keyword(keywords::As); let trait_name = self.parse_path(LifetimeAndTypesWithoutColons); - self.expect(&token::GT); - self.expect(&token::MOD_SEP); + self.expect(&token::Gt); + self.expect(&token::ModSep); let item_name = self.parse_ident(); TyQPath(P(QPath { for_type: for_type, trait_name: trait_name.path, item_name: item_name, })) - } else if self.token == token::MOD_SEP + } else if self.token == token::ModSep || is_ident_or_path(&self.token) { // NAMED TYPE let mode = if plus_allowed { @@ -1537,7 +1536,7 @@ impl<'a> Parser<'a> { bounds } = self.parse_path(mode); TyPath(path, bounds, ast::DUMMY_NODE_ID) - } else if self.eat(&token::UNDERSCORE) { + } else if self.eat(&token::Underscore) { // TYPE TO BE INFERRED TyInfer } else { @@ -1576,8 +1575,8 @@ impl<'a> Parser<'a> { pub fn is_named_argument(&mut self) -> bool { let offset = match self.token { - token::BINOP(token::AND) => 1, - token::ANDAND => 1, + token::BinOp(token::And) => 1, + token::AndAnd => 1, _ if token::is_keyword(keywords::Mut, &self.token) => 1, _ => 0 }; @@ -1586,10 +1585,10 @@ impl<'a> Parser<'a> { if offset == 0 { is_plain_ident_or_underscore(&self.token) - && self.look_ahead(1, |t| *t == token::COLON) + && self.look_ahead(1, |t| *t == token::Colon) } else { self.look_ahead(offset, |t| is_plain_ident_or_underscore(t)) - && self.look_ahead(offset + 1, |t| *t == token::COLON) + && self.look_ahead(offset + 1, |t| *t == token::Colon) } } @@ -1601,7 +1600,7 @@ impl<'a> Parser<'a> { require_name); let pat = self.parse_pat(); - self.expect(&token::COLON); + self.expect(&token::Colon); pat } else { debug!("parse_arg_general ident_to_pat"); @@ -1627,7 +1626,7 @@ impl<'a> Parser<'a> { /// Parse an argument in a lambda header e.g. |arg, arg| pub fn parse_fn_block_arg(&mut self) -> Arg { let pat = self.parse_pat(); - let t = if self.eat(&token::COLON) { + let t = if self.eat(&token::Colon) { self.parse_ty(true) } else { P(Ty { @@ -1644,8 +1643,8 @@ impl<'a> Parser<'a> { } pub fn maybe_parse_fixed_vstore(&mut self) -> Option> { - if self.token == token::COMMA && - self.look_ahead(1, |t| *t == token::DOTDOT) { + if self.token == token::Comma && + self.look_ahead(1, |t| *t == token::DotDot) { self.bump(); self.bump(); Some(self.parse_expr()) @@ -1657,24 +1656,24 @@ impl<'a> Parser<'a> { /// Matches token_lit = LIT_INTEGER | ... pub fn lit_from_token(&mut self, tok: &token::Token) -> Lit_ { match *tok { - token::LIT_BYTE(i) => LitByte(parse::byte_lit(i.as_str()).val0()), - token::LIT_CHAR(i) => LitChar(parse::char_lit(i.as_str()).val0()), - token::LIT_INTEGER(s) => parse::integer_lit(s.as_str(), + token::LitByte(i) => LitByte(parse::byte_lit(i.as_str()).val0()), + token::LitChar(i) => LitChar(parse::char_lit(i.as_str()).val0()), + token::LitInteger(s) => parse::integer_lit(s.as_str(), &self.sess.span_diagnostic, self.span), - token::LIT_FLOAT(s) => parse::float_lit(s.as_str()), - token::LIT_STR(s) => { + token::LitFloat(s) => parse::float_lit(s.as_str()), + token::LitStr(s) => { LitStr(token::intern_and_get_ident(parse::str_lit(s.as_str()).as_slice()), ast::CookedStr) } - token::LIT_STR_RAW(s, n) => { + token::LitStrRaw(s, n) => { LitStr(token::intern_and_get_ident(parse::raw_str_lit(s.as_str()).as_slice()), ast::RawStr(n)) } - token::LIT_BINARY(i) => + token::LitBinary(i) => LitBinary(parse::binary_lit(i.as_str())), - token::LIT_BINARY_RAW(i, _) => + token::LitBinaryRaw(i, _) => LitBinary(Rc::new(i.as_str().as_bytes().iter().map(|&x| x).collect())), - token::LPAREN => { self.expect(&token::RPAREN); LitNil }, + token::LParen => { self.expect(&token::RParen); LitNil }, _ => { self.unexpected_last(tok); } } } @@ -1697,7 +1696,7 @@ impl<'a> Parser<'a> { /// matches '-' lit | lit pub fn parse_literal_maybe_minus(&mut self) -> P { let minus_lo = self.span.lo; - let minus_present = self.eat(&token::BINOP(token::MINUS)); + let minus_present = self.eat(&token::BinOp(token::Minus)); let lo = self.span.lo; let literal = P(self.parse_lit()); @@ -1720,11 +1719,11 @@ impl<'a> Parser<'a> { pub fn parse_path(&mut self, mode: PathParsingMode) -> PathAndBounds { // Check for a whole path... let found = match self.token { - INTERPOLATED(token::NtPath(_)) => Some(self.bump_and_get()), + token::Interpolated(token::NtPath(_)) => Some(self.bump_and_get()), _ => None, }; match found { - Some(INTERPOLATED(token::NtPath(box path))) => { + Some(token::Interpolated(token::NtPath(box path))) => { return PathAndBounds { path: path, bounds: None @@ -1734,7 +1733,7 @@ impl<'a> Parser<'a> { } let lo = self.span.lo; - let is_global = self.eat(&token::MOD_SEP); + let is_global = self.eat(&token::ModSep); // Parse any number of segments and bound sets. A segment is an // identifier followed by an optional lifetime and a set of types. @@ -1747,7 +1746,7 @@ impl<'a> Parser<'a> { // Parse the '::' before type parameters if it's required. If // it is required and wasn't present, then we're done. if mode == LifetimeAndTypesWithColons && - !self.eat(&token::MOD_SEP) { + !self.eat(&token::ModSep) { segments.push(ast::PathSegment { identifier: identifier, lifetimes: Vec::new(), @@ -1778,7 +1777,7 @@ impl<'a> Parser<'a> { // a double colon to get here in the first place. if !(mode == LifetimeAndTypesWithColons && !any_lifetime_or_types) { - if !self.eat(&token::MOD_SEP) { + if !self.eat(&token::ModSep) { break } } @@ -1790,7 +1789,7 @@ impl<'a> Parser<'a> { // error. let opt_bounds = { if mode == LifetimeAndTypesAndBounds && - self.eat(&token::BINOP(token::PLUS)) + self.eat(&token::BinOp(token::Plus)) { let bounds = self.parse_ty_param_bounds(); @@ -1828,7 +1827,7 @@ impl<'a> Parser<'a> { /// parses 0 or 1 lifetime pub fn parse_opt_lifetime(&mut self) -> Option { match self.token { - token::LIFETIME(..) => { + token::Lifetime(..) => { Some(self.parse_lifetime()) } _ => { @@ -1841,7 +1840,7 @@ impl<'a> Parser<'a> { /// Matches lifetime = LIFETIME pub fn parse_lifetime(&mut self) -> ast::Lifetime { match self.token { - token::LIFETIME(i) => { + token::Lifetime(i) => { let span = self.span; self.bump(); return ast::Lifetime { @@ -1865,11 +1864,11 @@ impl<'a> Parser<'a> { let mut res = Vec::new(); loop { match self.token { - token::LIFETIME(_) => { + token::Lifetime(_) => { let lifetime = self.parse_lifetime(); let bounds = - if self.eat(&token::COLON) { - self.parse_lifetimes(token::BINOP(token::PLUS)) + if self.eat(&token::Colon) { + self.parse_lifetimes(token::BinOp(token::Plus)) } else { Vec::new() }; @@ -1883,9 +1882,9 @@ impl<'a> Parser<'a> { } match self.token { - token::COMMA => { self.bump(); } - token::GT => { return res; } - token::BINOP(token::SHR) => { return res; } + token::Comma => { self.bump(); } + token::Gt => { return res; } + token::BinOp(token::Shr) => { return res; } _ => { let msg = format!("expected `,` or `>` after lifetime \ name, got: {}", @@ -1910,7 +1909,7 @@ impl<'a> Parser<'a> { let mut res = Vec::new(); loop { match self.token { - token::LIFETIME(_) => { + token::Lifetime(_) => { res.push(self.parse_lifetime()); } _ => { @@ -1945,7 +1944,7 @@ impl<'a> Parser<'a> { let lo = self.span.lo; let i = self.parse_ident(); let hi = self.last_span.hi; - self.expect(&token::COLON); + self.expect(&token::Colon); let e = self.parse_expr(); ast::Field { ident: spanned(lo, hi, i), @@ -2043,31 +2042,31 @@ impl<'a> Parser<'a> { let ex: Expr_; match self.token { - token::LPAREN => { + token::LParen => { self.bump(); // (e) is parenthesized e // (e,) is a tuple with only one field, e let mut trailing_comma = false; - if self.token == token::RPAREN { + if self.token == token::RParen { hi = self.span.hi; self.bump(); let lit = P(spanned(lo, hi, LitNil)); return self.mk_expr(lo, hi, ExprLit(lit)); } let mut es = vec!(self.parse_expr()); - self.commit_expr(&**es.last().unwrap(), &[], &[token::COMMA, token::RPAREN]); - while self.token == token::COMMA { + self.commit_expr(&**es.last().unwrap(), &[], &[token::Comma, token::RParen]); + while self.token == token::Comma { self.bump(); - if self.token != token::RPAREN { + if self.token != token::RParen { es.push(self.parse_expr()); self.commit_expr(&**es.last().unwrap(), &[], - &[token::COMMA, token::RPAREN]); + &[token::Comma, token::RParen]); } else { trailing_comma = true; } } hi = self.span.hi; - self.commit_expr_expecting(&**es.last().unwrap(), token::RPAREN); + self.commit_expr_expecting(&**es.last().unwrap(), token::RParen); return if es.len() == 1 && !trailing_comma { self.mk_expr(lo, hi, ExprParen(es.into_iter().nth(0).unwrap())) @@ -2075,18 +2074,18 @@ impl<'a> Parser<'a> { self.mk_expr(lo, hi, ExprTup(es)) } }, - token::LBRACE => { + token::LBrace => { self.bump(); let blk = self.parse_block_tail(lo, DefaultBlock); return self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk)); }, - token::BINOP(token::OR) | token::OROR => { + token::BinOp(token::Or) | token::OrOr => { return self.parse_lambda_expr(CaptureByRef); }, // FIXME #13626: Should be able to stick in // token::SELF_KEYWORD_NAME - token::IDENT(id @ ast::Ident{ + token::Ident(id @ ast::Ident{ name: ast::Name(token::SELF_KEYWORD_NAME_NUM), ctxt: _ } ,false) => { @@ -2095,30 +2094,30 @@ impl<'a> Parser<'a> { ex = ExprPath(path); hi = self.last_span.hi; } - token::LBRACKET => { + token::LBracket => { self.bump(); - if self.token == token::RBRACKET { + if self.token == token::RBracket { // Empty vector. self.bump(); ex = ExprVec(Vec::new()); } else { // Nonempty vector. let first_expr = self.parse_expr(); - if self.token == token::COMMA && - self.look_ahead(1, |t| *t == token::DOTDOT) { + if self.token == token::Comma && + self.look_ahead(1, |t| *t == token::DotDot) { // Repeating vector syntax: [ 0, ..512 ] self.bump(); self.bump(); let count = self.parse_expr(); - self.expect(&token::RBRACKET); + self.expect(&token::RBracket); ex = ExprRepeat(first_expr, count); - } else if self.token == token::COMMA { + } else if self.token == token::Comma { // Vector with two or more elements. self.bump(); let remaining_exprs = self.parse_seq_to_end( - &token::RBRACKET, - seq_sep_trailing_allowed(token::COMMA), + &token::RBracket, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_expr() ); let mut exprs = vec!(first_expr); @@ -2126,7 +2125,7 @@ impl<'a> Parser<'a> { ex = ExprVec(exprs); } else { // Vector with one element. - self.expect(&token::RBRACKET); + self.expect(&token::RBracket); ex = ExprVec(vec!(first_expr)); } } @@ -2161,7 +2160,7 @@ impl<'a> Parser<'a> { if Parser::token_is_lifetime(&self.token) { let lifetime = self.get_lifetime(); self.bump(); - self.expect(&token::COLON); + self.expect(&token::Colon); if self.eat_keyword(keywords::While) { return self.parse_while_expr(Some(lifetime)) } @@ -2215,7 +2214,7 @@ impl<'a> Parser<'a> { ex = ExprBreak(None); } hi = self.span.hi; - } else if self.token == token::MOD_SEP || + } else if self.token == token::ModSep || is_ident(&self.token) && !self.is_keyword(keywords::True) && !self.is_keyword(keywords::False) { @@ -2223,7 +2222,7 @@ impl<'a> Parser<'a> { self.parse_path(LifetimeAndTypesWithColons).path; // `!`, as an operator, is prefix, so we know this isn't that - if self.token == token::NOT { + if self.token == token::Not { // MACRO INVOCATION expression self.bump(); @@ -2245,7 +2244,7 @@ impl<'a> Parser<'a> { tts, EMPTY_CTXT)); } - if self.token == token::LBRACE { + if self.token == token::LBrace { // This is a struct literal, unless we're prohibited // from parsing struct literals here. if !self.restrictions.contains(RESTRICTION_NO_STRUCT_LITERAL) { @@ -2254,16 +2253,16 @@ impl<'a> Parser<'a> { let mut fields = Vec::new(); let mut base = None; - while self.token != token::RBRACE { - if self.eat(&token::DOTDOT) { + while self.token != token::RBrace { + if self.eat(&token::DotDot) { base = Some(self.parse_expr()); break; } fields.push(self.parse_field()); self.commit_expr(&*fields.last().unwrap().expr, - &[token::COMMA], - &[token::RBRACE]); + &[token::Comma], + &[token::RBrace]); } if fields.len() == 0 && base.is_none() { @@ -2276,7 +2275,7 @@ impl<'a> Parser<'a> { } hi = self.span.hi; - self.expect(&token::RBRACE); + self.expect(&token::RBrace); ex = ExprStruct(pth, fields, base); return self.mk_expr(lo, hi, ex); } @@ -2299,7 +2298,7 @@ impl<'a> Parser<'a> { /// Parse a block or unsafe block pub fn parse_block_expr(&mut self, lo: BytePos, blk_mode: BlockCheckMode) -> P { - self.expect(&token::LBRACE); + self.expect(&token::LBrace); let blk = self.parse_block_tail(lo, blk_mode); return self.mk_expr(blk.span.lo, blk.span.hi, ExprBlock(blk)); } @@ -2316,13 +2315,13 @@ impl<'a> Parser<'a> { let mut hi; loop { // expr.f - if self.eat(&token::DOT) { + if self.eat(&token::Dot) { match self.token { - token::IDENT(i, _) => { + token::Ident(i, _) => { let dot = self.last_span.hi; hi = self.span.hi; self.bump(); - let (_, tys) = if self.eat(&token::MOD_SEP) { + let (_, tys) = if self.eat(&token::ModSep) { self.expect_lt(); self.parse_generic_values_after_lt() } else { @@ -2331,11 +2330,11 @@ impl<'a> Parser<'a> { // expr.f() method call match self.token { - token::LPAREN => { + token::LParen => { let mut es = self.parse_unspanned_seq( - &token::LPAREN, - &token::RPAREN, - seq_sep_trailing_allowed(token::COMMA), + &token::LParen, + &token::RParen, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_expr() ); hi = self.last_span.hi; @@ -2352,12 +2351,12 @@ impl<'a> Parser<'a> { } } } - token::LIT_INTEGER(n) => { + token::LitInteger(n) => { let index = n.as_str(); let dot = self.last_span.hi; hi = self.span.hi; self.bump(); - let (_, tys) = if self.eat(&token::MOD_SEP) { + let (_, tys) = if self.eat(&token::ModSep) { self.expect_lt(); self.parse_generic_values_after_lt() } else { @@ -2377,7 +2376,7 @@ impl<'a> Parser<'a> { } } } - token::LIT_FLOAT(n) => { + token::LitFloat(n) => { self.bump(); let last_span = self.last_span; self.span_err(last_span, @@ -2394,11 +2393,11 @@ impl<'a> Parser<'a> { if self.expr_is_complete(&*e) { break; } match self.token { // expr(...) - token::LPAREN => { + token::LParen => { let es = self.parse_unspanned_seq( - &token::LPAREN, - &token::RPAREN, - seq_sep_trailing_allowed(token::COMMA), + &token::LParen, + &token::RParen, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_expr() ); hi = self.last_span.hi; @@ -2411,7 +2410,7 @@ impl<'a> Parser<'a> { // Could be either an index expression or a slicing expression. // Any slicing non-terminal can have a mutable version with `mut` // after the opening square bracket. - token::LBRACKET => { + token::LBracket => { self.bump(); let mutbl = if self.eat_keyword(keywords::Mut) { MutMutable @@ -2420,18 +2419,18 @@ impl<'a> Parser<'a> { }; match self.token { // e[] - token::RBRACKET => { + token::RBracket => { self.bump(); hi = self.span.hi; let slice = self.mk_slice(e, None, None, mutbl); e = self.mk_expr(lo, hi, slice) } // e[..e] - token::DOTDOT => { + token::DotDot => { self.bump(); match self.token { // e[..] - token::RBRACKET => { + token::RBracket => { self.bump(); hi = self.span.hi; let slice = self.mk_slice(e, None, None, mutbl); @@ -2445,7 +2444,7 @@ impl<'a> Parser<'a> { _ => { hi = self.span.hi; let e2 = self.parse_expr(); - self.commit_expr_expecting(&*e2, token::RBRACKET); + self.commit_expr_expecting(&*e2, token::RBracket); let slice = self.mk_slice(e, None, Some(e2), mutbl); e = self.mk_expr(lo, hi, slice) } @@ -2456,18 +2455,18 @@ impl<'a> Parser<'a> { let ix = self.parse_expr(); match self.token { // e[e..] | e[e..e] - token::DOTDOT => { + token::DotDot => { self.bump(); let e2 = match self.token { // e[e..] - token::RBRACKET => { + token::RBracket => { self.bump(); None } // e[e..e] _ => { let e2 = self.parse_expr(); - self.commit_expr_expecting(&*e2, token::RBRACKET); + self.commit_expr_expecting(&*e2, token::RBracket); Some(e2) } }; @@ -2482,7 +2481,7 @@ impl<'a> Parser<'a> { "`mut` keyword is invalid in index expressions"); } hi = self.span.hi; - self.commit_expr_expecting(&*ix, token::RBRACKET); + self.commit_expr_expecting(&*ix, token::RBracket); let index = self.mk_index(e, ix); e = self.mk_expr(lo, hi, index) } @@ -2502,11 +2501,11 @@ impl<'a> Parser<'a> { pub fn parse_sep_and_kleene_op(&mut self) -> (Option, ast::KleeneOp) { fn parse_kleene_op(parser: &mut Parser) -> Option { match parser.token { - token::BINOP(token::STAR) => { + token::BinOp(token::Star) => { parser.bump(); Some(ast::ZeroOrMore) }, - token::BINOP(token::PLUS) => { + token::BinOp(token::Plus) => { parser.bump(); Some(ast::OneOrMore) }, @@ -2543,7 +2542,7 @@ impl<'a> Parser<'a> { fn parse_non_delim_tt_tok(p: &mut Parser) -> TokenTree { maybe_whole!(deref p, NtTT); match p.token { - token::RPAREN | token::RBRACE | token::RBRACKET => { + token::RParen | token::RBrace | token::RBracket => { // This is a conservative error: only report the last unclosed delimiter. The // previous unclosed delimiters could actually be closed! The parser just hasn't // gotten to them yet. @@ -2556,14 +2555,14 @@ impl<'a> Parser<'a> { token_str).as_slice()) }, /* we ought to allow different depths of unquotation */ - token::DOLLAR if p.quote_depth > 0u => { + token::Dollar if p.quote_depth > 0u => { p.bump(); let sp = p.span; - if p.token == token::LPAREN { + if p.token == token::LParen { let seq = p.parse_seq( - &token::LPAREN, - &token::RPAREN, + &token::LParen, + &token::RParen, seq_sep_none(), |p| p.parse_token_tree() ); @@ -2583,7 +2582,7 @@ impl<'a> Parser<'a> { } match (&self.token, token::close_delimiter_for(&self.token)) { - (&token::EOF, _) => { + (&token::Eof, _) => { let open_braces = self.open_braces.clone(); for sp in open_braces.iter() { self.span_note(*sp, "Did you mean to close this delimiter?"); @@ -2628,7 +2627,7 @@ impl<'a> Parser<'a> { // up to EOF. pub fn parse_all_token_trees(&mut self) -> Vec { let mut tts = Vec::new(); - while self.token != token::EOF { + while self.token != token::Eof { tts.push(self.parse_token_tree()); } tts @@ -2659,8 +2658,8 @@ impl<'a> Parser<'a> { let mut lparens = 0u; while self.token != *ket || lparens > 0u { - if self.token == token::LPAREN { lparens += 1u; } - if self.token == token::RPAREN { lparens -= 1u; } + if self.token == token::LParen { lparens += 1u; } + if self.token == token::RParen { lparens -= 1u; } ret_val.push(self.parse_matcher(name_idx)); } @@ -2672,13 +2671,13 @@ impl<'a> Parser<'a> { pub fn parse_matcher(&mut self, name_idx: &mut uint) -> Matcher { let lo = self.span.lo; - let m = if self.token == token::DOLLAR { + let m = if self.token == token::Dollar { self.bump(); - if self.token == token::LPAREN { + if self.token == token::LParen { let name_idx_lo = *name_idx; self.bump(); let ms = self.parse_matcher_subseq_upto(name_idx, - &token::RPAREN); + &token::RParen); if ms.len() == 0u { self.fatal("repetition body must be nonempty"); } @@ -2686,7 +2685,7 @@ impl<'a> Parser<'a> { MatchSeq(ms, sep, kleene_op, name_idx_lo, *name_idx) } else { let bound_to = self.parse_ident(); - self.expect(&token::COLON); + self.expect(&token::Colon); let nt_name = self.parse_ident(); let m = MatchNonterminal(bound_to, nt_name, *name_idx); *name_idx += 1; @@ -2706,36 +2705,36 @@ impl<'a> Parser<'a> { let ex; match self.token { - token::NOT => { + token::Not => { self.bump(); let e = self.parse_prefix_expr(); hi = e.span.hi; ex = self.mk_unary(UnNot, e); } - token::BINOP(token::MINUS) => { + token::BinOp(token::Minus) => { self.bump(); let e = self.parse_prefix_expr(); hi = e.span.hi; ex = self.mk_unary(UnNeg, e); } - token::BINOP(token::STAR) => { + token::BinOp(token::Star) => { self.bump(); let e = self.parse_prefix_expr(); hi = e.span.hi; ex = self.mk_unary(UnDeref, e); } - token::BINOP(token::AND) | token::ANDAND => { + token::BinOp(token::And) | token::AndAnd => { self.expect_and(); let m = self.parse_mutability(); let e = self.parse_prefix_expr(); hi = e.span.hi; ex = ExprAddrOf(m, e); } - token::TILDE => { + token::Tilde => { self.bump(); let last_span = self.last_span; match self.token { - token::LBRACKET => self.obsolete(last_span, ObsoleteOwnedVector), + token::LBracket => self.obsolete(last_span, ObsoleteOwnedVector), _ => self.obsolete(last_span, ObsoleteOwnedExpr) } @@ -2743,7 +2742,7 @@ impl<'a> Parser<'a> { hi = e.span.hi; ex = self.mk_unary(UnUniq, e); } - token::IDENT(_, _) => { + token::Ident(_, _) => { if !self.is_keyword(keywords::Box) { return self.parse_dot_or_call_expr(); } @@ -2751,11 +2750,11 @@ impl<'a> Parser<'a> { self.bump(); // Check for a place: `box(PLACE) EXPR`. - if self.eat(&token::LPAREN) { + if self.eat(&token::LParen) { // Support `box() EXPR` as the default. - if !self.eat(&token::RPAREN) { + if !self.eat(&token::RParen) { let place = self.parse_expr(); - self.expect(&token::RPAREN); + self.expect(&token::RParen); let subexpression = self.parse_prefix_expr(); hi = subexpression.span.hi; ex = ExprBox(place, subexpression); @@ -2785,7 +2784,7 @@ impl<'a> Parser<'a> { // Prevent dynamic borrow errors later on by limiting the // scope of the borrows. - if self.token == token::BINOP(token::OR) && + if self.token == token::BinOp(token::Or) && self.restrictions.contains(RESTRICTION_NO_BAR_OP) { return lhs; } @@ -2829,25 +2828,25 @@ impl<'a> Parser<'a> { let lhs = self.parse_binops(); let restrictions = self.restrictions & RESTRICTION_NO_STRUCT_LITERAL; match self.token { - token::EQ => { + token::Eq => { self.bump(); let rhs = self.parse_expr_res(restrictions); self.mk_expr(lo, rhs.span.hi, ExprAssign(lhs, rhs)) } - token::BINOPEQ(op) => { + token::BinOpEq(op) => { self.bump(); let rhs = self.parse_expr_res(restrictions); let aop = match op { - token::PLUS => BiAdd, - token::MINUS => BiSub, - token::STAR => BiMul, - token::SLASH => BiDiv, - token::PERCENT => BiRem, - token::CARET => BiBitXor, - token::AND => BiBitAnd, - token::OR => BiBitOr, - token::SHL => BiShl, - token::SHR => BiShr + token::Plus => BiAdd, + token::Minus => BiSub, + token::Star => BiMul, + token::Slash => BiDiv, + token::Percent => BiRem, + token::Caret => BiBitXor, + token::And => BiBitAnd, + token::Or => BiBitOr, + token::Shl => BiShl, + token::Shr => BiShr }; let rhs_span = rhs.span; let assign_op = self.mk_assign_op(aop, lhs, rhs); @@ -2882,7 +2881,7 @@ impl<'a> Parser<'a> { let lo = self.last_span.lo; self.expect_keyword(keywords::Let); let pat = self.parse_pat(); - self.expect(&token::EQ); + self.expect(&token::Eq); let expr = self.parse_expr_res(RESTRICTION_NO_STRUCT_LITERAL); let thn = self.parse_block(); let (hi, els) = if self.eat_keyword(keywords::Else) { @@ -2967,7 +2966,7 @@ impl<'a> Parser<'a> { let lo = self.last_span.lo; self.expect_keyword(keywords::Let); let pat = self.parse_pat(); - self.expect(&token::EQ); + self.expect(&token::Eq); let expr = self.parse_expr_res(RESTRICTION_NO_STRUCT_LITERAL); let body = self.parse_block(); let hi = body.span.hi; @@ -2984,9 +2983,9 @@ impl<'a> Parser<'a> { fn parse_match_expr(&mut self) -> P { let lo = self.last_span.lo; let discriminant = self.parse_expr_res(RESTRICTION_NO_STRUCT_LITERAL); - self.commit_expr_expecting(&*discriminant, token::LBRACE); + self.commit_expr_expecting(&*discriminant, token::LBrace); let mut arms: Vec = Vec::new(); - while self.token != token::RBRACE { + while self.token != token::RBrace { arms.push(self.parse_arm()); } let hi = self.span.hi; @@ -3001,17 +3000,17 @@ impl<'a> Parser<'a> { if self.eat_keyword(keywords::If) { guard = Some(self.parse_expr()); } - self.expect(&token::FAT_ARROW); + self.expect(&token::FatArrow); let expr = self.parse_expr_res(RESTRICTION_STMT_EXPR); let require_comma = !classify::expr_is_simple_block(&*expr) - && self.token != token::RBRACE; + && self.token != token::RBrace; if require_comma { - self.commit_expr(&*expr, &[token::COMMA], &[token::RBRACE]); + self.commit_expr(&*expr, &[token::Comma], &[token::RBrace]); } else { - self.eat(&token::COMMA); + self.eat(&token::Comma); } ast::Arm { @@ -3038,7 +3037,7 @@ impl<'a> Parser<'a> { /// Parse the RHS of a local variable declaration (e.g. '= 14;') fn parse_initializer(&mut self) -> Option> { - if self.token == token::EQ { + if self.token == token::Eq { self.bump(); Some(self.parse_expr()) } else { @@ -3051,7 +3050,7 @@ impl<'a> Parser<'a> { let mut pats = Vec::new(); loop { pats.push(self.parse_pat()); - if self.token == token::BINOP(token::OR) { self.bump(); } + if self.token == token::BinOp(token::Or) { self.bump(); } else { return pats; } }; } @@ -3065,19 +3064,19 @@ impl<'a> Parser<'a> { let mut first = true; let mut before_slice = true; - while self.token != token::RBRACKET { + while self.token != token::RBracket { if first { first = false; } else { - self.expect(&token::COMMA); + self.expect(&token::Comma); } if before_slice { - if self.token == token::DOTDOT { + if self.token == token::DotDot { self.bump(); - if self.token == token::COMMA || - self.token == token::RBRACKET { + if self.token == token::Comma || + self.token == token::RBracket { slice = Some(P(ast::Pat { id: ast::DUMMY_NODE_ID, node: PatWild(PatWildMulti), @@ -3094,7 +3093,7 @@ impl<'a> Parser<'a> { } let subpat = self.parse_pat(); - if before_slice && self.token == token::DOTDOT { + if before_slice && self.token == token::DotDot { self.bump(); slice = Some(subpat); before_slice = false; @@ -3113,21 +3112,21 @@ impl<'a> Parser<'a> { let mut fields = Vec::new(); let mut etc = false; let mut first = true; - while self.token != token::RBRACE { + while self.token != token::RBrace { if first { first = false; } else { - self.expect(&token::COMMA); + self.expect(&token::Comma); // accept trailing commas - if self.token == token::RBRACE { break } + if self.token == token::RBrace { break } } let lo = self.span.lo; let hi; - if self.token == token::DOTDOT { + if self.token == token::DotDot { self.bump(); - if self.token != token::RBRACE { + if self.token != token::RBrace { let token_str = self.this_token_to_string(); self.fatal(format!("expected `{}`, found `{}`", "}", token_str).as_slice()) @@ -3146,7 +3145,7 @@ impl<'a> Parser<'a> { let fieldname = self.parse_ident(); - let (subpat, is_shorthand) = if self.token == token::COLON { + let (subpat, is_shorthand) = if self.token == token::Colon { match bind_type { BindByRef(..) | BindByValue(MutMutable) => { let token_str = self.this_token_to_string(); @@ -3186,7 +3185,7 @@ impl<'a> Parser<'a> { let pat; match self.token { // parse _ - token::UNDERSCORE => { + token::Underscore => { self.bump(); pat = PatWild(PatWildSingle); hi = self.last_span.hi; @@ -3196,7 +3195,7 @@ impl<'a> Parser<'a> { span: mk_sp(lo, hi) }) } - token::TILDE => { + token::Tilde => { // parse ~pat self.bump(); let sub = self.parse_pat(); @@ -3210,7 +3209,7 @@ impl<'a> Parser<'a> { span: mk_sp(lo, hi) }) } - token::BINOP(token::AND) | token::ANDAND => { + token::BinOp(token::And) | token::AndAnd => { // parse &pat let lo = self.span.lo; self.expect_and(); @@ -3223,10 +3222,10 @@ impl<'a> Parser<'a> { span: mk_sp(lo, hi) }) } - token::LPAREN => { + token::LParen => { // parse (pat,pat,pat,...) as tuple self.bump(); - if self.token == token::RPAREN { + if self.token == token::RParen { hi = self.span.hi; self.bump(); let lit = P(codemap::Spanned { @@ -3236,15 +3235,15 @@ impl<'a> Parser<'a> { pat = PatLit(expr); } else { let mut fields = vec!(self.parse_pat()); - if self.look_ahead(1, |t| *t != token::RPAREN) { - while self.token == token::COMMA { + if self.look_ahead(1, |t| *t != token::RParen) { + while self.token == token::Comma { self.bump(); - if self.token == token::RPAREN { break; } + if self.token == token::RParen { break; } fields.push(self.parse_pat()); } } - if fields.len() == 1 { self.expect(&token::COMMA); } - self.expect(&token::RPAREN); + if fields.len() == 1 { self.expect(&token::Comma); } + self.expect(&token::RParen); pat = PatTup(fields); } hi = self.last_span.hi; @@ -3254,13 +3253,13 @@ impl<'a> Parser<'a> { span: mk_sp(lo, hi) }) } - token::LBRACKET => { + token::LBracket => { // parse [pat,pat,...] as vector pattern self.bump(); let (before, slice, after) = self.parse_pat_vec_elements(); - self.expect(&token::RBRACKET); + self.expect(&token::RBracket); pat = ast::PatVec(before, slice, after); hi = self.last_span.hi; return P(ast::Pat { @@ -3273,7 +3272,7 @@ impl<'a> Parser<'a> { } // at this point, token != _, ~, &, &&, (, [ - if (!is_ident_or_path(&self.token) && self.token != token::MOD_SEP) + if (!is_ident_or_path(&self.token) && self.token != token::ModSep) || self.is_keyword(keywords::True) || self.is_keyword(keywords::False) { // Parse an expression pattern or exp .. exp. @@ -3281,9 +3280,9 @@ impl<'a> Parser<'a> { // These expressions are limited to literals (possibly // preceded by unary-minus) or identifiers. let val = self.parse_literal_maybe_minus(); - if (self.token == token::DOTDOTDOT) && + if (self.token == token::DotDotDot) && self.look_ahead(1, |t| { - *t != token::COMMA && *t != token::RBRACKET + *t != token::Comma && *t != token::RBracket }) { self.bump(); let end = if is_ident_or_path(&self.token) { @@ -3320,25 +3319,25 @@ impl<'a> Parser<'a> { } else { let can_be_enum_or_struct = self.look_ahead(1, |t| { match *t { - token::LPAREN | token::LBRACKET | token::LT | - token::LBRACE | token::MOD_SEP => true, + token::LParen | token::LBracket | token::Lt | + token::LBrace | token::ModSep => true, _ => false, } }); - if self.look_ahead(1, |t| *t == token::DOTDOTDOT) && + if self.look_ahead(1, |t| *t == token::DotDotDot) && self.look_ahead(2, |t| { - *t != token::COMMA && *t != token::RBRACKET + *t != token::Comma && *t != token::RBracket }) { let start = self.parse_expr_res(RESTRICTION_NO_BAR_OP); - self.eat(&token::DOTDOTDOT); + self.eat(&token::DotDotDot); let end = self.parse_expr_res(RESTRICTION_NO_BAR_OP); pat = PatRange(start, end); } else if is_plain_ident(&self.token) && !can_be_enum_or_struct { let id = self.parse_ident(); let id_span = self.last_span; let pth1 = codemap::Spanned{span:id_span, node: id}; - if self.eat(&token::NOT) { + if self.eat(&token::Not) { // macro invocation let ket = token::close_delimiter_for(&self.token) .unwrap_or_else(|| self.fatal("expected open delimiter")); @@ -3351,7 +3350,7 @@ impl<'a> Parser<'a> { let mac = MacInvocTT(ident_to_path(id_span,id), tts, EMPTY_CTXT); pat = ast::PatMac(codemap::Spanned {node: mac, span: self.span}); } else { - let sub = if self.eat(&token::AT) { + let sub = if self.eat(&token::At) { // parse foo @ pat Some(self.parse_pat()) } else { @@ -3365,7 +3364,7 @@ impl<'a> Parser<'a> { let enum_path = self.parse_path(LifetimeAndTypesWithColons) .path; match self.token { - token::LBRACE => { + token::LBrace => { self.bump(); let (fields, etc) = self.parse_pat_fields(); @@ -3375,10 +3374,10 @@ impl<'a> Parser<'a> { _ => { let mut args: Vec> = Vec::new(); match self.token { - token::LPAREN => { + token::LParen => { let is_dotdot = self.look_ahead(1, |t| { match *t { - token::DOTDOT => true, + token::DotDot => true, _ => false, } }); @@ -3386,13 +3385,13 @@ impl<'a> Parser<'a> { // This is a "top constructor only" pat self.bump(); self.bump(); - self.expect(&token::RPAREN); + self.expect(&token::RParen); pat = PatEnum(enum_path, None); } else { args = self.parse_enum_variant_seq( - &token::LPAREN, - &token::RPAREN, - seq_sep_trailing_allowed(token::COMMA), + &token::LParen, + &token::RParen, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_pat() ); pat = PatEnum(enum_path, Some(args)); @@ -3448,7 +3447,7 @@ impl<'a> Parser<'a> { let ident = self.parse_ident(); let last_span = self.last_span; let name = codemap::Spanned{span: last_span, node: ident}; - let sub = if self.eat(&token::AT) { + let sub = if self.eat(&token::At) { Some(self.parse_pat()) } else { None @@ -3460,7 +3459,7 @@ impl<'a> Parser<'a> { // leads to a parse error. Note that if there is no explicit // binding mode then we do not end up here, because the lookahead // will direct us over to parse_enum_variant() - if self.token == token::LPAREN { + if self.token == token::LParen { let last_span = self.last_span; self.span_fatal( last_span, @@ -3480,7 +3479,7 @@ impl<'a> Parser<'a> { node: TyInfer, span: mk_sp(lo, lo), }); - if self.eat(&token::COLON) { + if self.eat(&token::Colon) { ty = self.parse_ty(true); } let init = self.parse_initializer(); @@ -3509,7 +3508,7 @@ impl<'a> Parser<'a> { self.fatal("expected ident"); } let name = self.parse_ident(); - self.expect(&token::COLON); + self.expect(&token::Colon); let ty = self.parse_ty(true); spanned(lo, self.last_span.hi, ast::StructField_ { kind: NamedField(name, pr), @@ -3550,7 +3549,7 @@ impl<'a> Parser<'a> { P(spanned(lo, decl.span.hi, StmtDecl(decl, ast::DUMMY_NODE_ID))) } else if is_ident(&self.token) && !token::is_any_keyword(&self.token) - && self.look_ahead(1, |t| *t == token::NOT) { + && self.look_ahead(1, |t| *t == token::Not) { // it's a macro invocation: check_expected_item(self, item_attrs.as_slice()); @@ -3649,7 +3648,7 @@ impl<'a> Parser<'a> { maybe_whole!(no_clone self, NtBlock); let lo = self.span.lo; - self.expect(&token::LBRACE); + self.expect(&token::LBrace); return self.parse_block_tail_(lo, DefaultBlock, Vec::new()); } @@ -3661,7 +3660,7 @@ impl<'a> Parser<'a> { maybe_whole!(pair_empty self, NtBlock); let lo = self.span.lo; - self.expect(&token::LBRACE); + self.expect(&token::LBrace); let (inner, next) = self.parse_inner_attrs_and_next(); (inner, self.parse_block_tail_(lo, DefaultBlock, next)) @@ -3698,12 +3697,12 @@ impl<'a> Parser<'a> { let mut attributes_box = attrs_remaining; - while self.token != token::RBRACE { + while self.token != token::RBrace { // parsing items even when they're not allowed lets us give // better error messages and recover more gracefully. attributes_box.push_all(self.parse_outer_attributes().as_slice()); match self.token { - token::SEMI => { + token::Semi => { if !attributes_box.is_empty() { let last_span = self.last_span; self.span_err(last_span, @@ -3712,7 +3711,7 @@ impl<'a> Parser<'a> { } self.bump(); // empty } - token::RBRACE => { + token::RBrace => { // fall through and out. } _ => { @@ -3723,11 +3722,11 @@ impl<'a> Parser<'a> { // expression without semicolon if classify::expr_requires_semi_to_be_stmt(&*e) { // Just check for errors and recover; do not eat semicolon yet. - self.commit_stmt(&[], &[token::SEMI, token::RBRACE]); + self.commit_stmt(&[], &[token::Semi, token::RBrace]); } match self.token { - token::SEMI => { + token::Semi => { self.bump(); let span_with_semi = Span { lo: span.lo, @@ -3739,7 +3738,7 @@ impl<'a> Parser<'a> { span: span_with_semi, })); } - token::RBRACE => { + token::RBrace => { expr = Some(e); } _ => { @@ -3753,14 +3752,14 @@ impl<'a> Parser<'a> { StmtMac(m, semi) => { // statement macro; might be an expr match self.token { - token::SEMI => { + token::Semi => { stmts.push(P(Spanned { node: StmtMac(m, true), span: span, })); self.bump(); } - token::RBRACE => { + token::RBrace => { // if a block ends in `m!(arg)` without // a `;`, it must be an expr expr = Some( @@ -3778,7 +3777,7 @@ impl<'a> Parser<'a> { } _ => { // all other kinds of statements: if classify::stmt_ends_with_semi(&node) { - self.commit_stmt_expecting(token::SEMI); + self.commit_stmt_expecting(token::Semi); } stmts.push(P(Spanned { @@ -3814,7 +3813,7 @@ impl<'a> Parser<'a> { fn parse_colon_then_ty_param_bounds(&mut self) -> OwnedSlice { - if !self.eat(&token::COLON) { + if !self.eat(&token::Colon) { OwnedSlice::empty() } else { self.parse_ty_param_bounds() @@ -3830,7 +3829,7 @@ impl<'a> Parser<'a> { { let mut result = vec!(); loop { - let lifetime_defs = if self.eat(&token::LT) { + let lifetime_defs = if self.eat(&token::Lt) { let lifetime_defs = self.parse_lifetime_defs(); self.expect_gt(); lifetime_defs @@ -3838,7 +3837,7 @@ impl<'a> Parser<'a> { Vec::new() }; match self.token { - token::LIFETIME(lifetime) => { + token::Lifetime(lifetime) => { if lifetime_defs.len() > 0 { let span = self.last_span; self.span_err(span, "lifetime declarations are not \ @@ -3852,14 +3851,14 @@ impl<'a> Parser<'a> { })); self.bump(); } - token::MOD_SEP | token::IDENT(..) => { + token::ModSep | token::Ident(..) => { let path = self.parse_path(LifetimeAndTypesWithoutColons).path; - if self.token == token::LPAREN { + if self.token == token::LParen { self.bump(); let inputs = self.parse_seq_to_end( - &token::RPAREN, - seq_sep_trailing_allowed(token::COMMA), + &token::RParen, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_arg_general(false)); let (return_style, output) = self.parse_ret_ty(); result.push(UnboxedFnTyParamBound(P(UnboxedFnBound { @@ -3884,7 +3883,7 @@ impl<'a> Parser<'a> { _ => break, } - if !self.eat(&token::BINOP(token::PLUS)) { + if !self.eat(&token::BinOp(token::Plus)) { break; } } @@ -3920,7 +3919,7 @@ impl<'a> Parser<'a> { let mut span = self.span; let mut ident = self.parse_ident(); let mut unbound = None; - if self.eat(&token::QUESTION) { + if self.eat(&token::Question) { let tref = Parser::trait_ref_from_ident(ident, span); unbound = Some(TraitTyParamBound(tref)); span = self.span; @@ -3929,7 +3928,7 @@ impl<'a> Parser<'a> { let bounds = self.parse_colon_then_ty_param_bounds(); - let default = if self.token == token::EQ { + let default = if self.token == token::Eq { self.bump(); Some(self.parse_ty(true)) } @@ -3953,10 +3952,10 @@ impl<'a> Parser<'a> { /// | ( < lifetimes , typaramseq ( , )? > ) /// where typaramseq = ( typaram ) | ( typaram , typaramseq ) pub fn parse_generics(&mut self) -> ast::Generics { - if self.eat(&token::LT) { + if self.eat(&token::Lt) { let lifetime_defs = self.parse_lifetime_defs(); let mut seen_default = false; - let ty_params = self.parse_seq_to_gt(Some(token::COMMA), |p| { + let ty_params = self.parse_seq_to_gt(Some(token::Comma), |p| { p.forbid_lifetime(); let ty_param = p.parse_ty_param(); if ty_param.default.is_some() { @@ -3982,9 +3981,9 @@ impl<'a> Parser<'a> { } fn parse_generic_values_after_lt(&mut self) -> (Vec, Vec> ) { - let lifetimes = self.parse_lifetimes(token::COMMA); + let lifetimes = self.parse_lifetimes(token::Comma); let result = self.parse_seq_to_gt( - Some(token::COMMA), + Some(token::Comma), |p| { p.forbid_lifetime(); p.parse_ty(true) @@ -4011,10 +4010,10 @@ impl<'a> Parser<'a> { loop { let lo = self.span.lo; let ident = match self.token { - token::IDENT(..) => self.parse_ident(), + token::Ident(..) => self.parse_ident(), _ => break, }; - self.expect(&token::COLON); + self.expect(&token::Colon); let bounds = self.parse_ty_param_bounds(); let hi = self.span.hi; @@ -4034,7 +4033,7 @@ impl<'a> Parser<'a> { }); parsed_something = true; - if !self.eat(&token::COMMA) { + if !self.eat(&token::Comma) { break } } @@ -4052,14 +4051,14 @@ impl<'a> Parser<'a> { let sp = self.span; let mut args: Vec> = self.parse_unspanned_seq( - &token::LPAREN, - &token::RPAREN, - seq_sep_trailing_allowed(token::COMMA), + &token::LParen, + &token::RParen, + seq_sep_trailing_allowed(token::Comma), |p| { - if p.token == token::DOTDOTDOT { + if p.token == token::DotDotDot { p.bump(); if allow_variadic { - if p.token != token::RPAREN { + if p.token != token::RParen { let span = p.span; p.span_fatal(span, "`...` must be last in argument list for variadic function"); @@ -4112,14 +4111,14 @@ impl<'a> Parser<'a> { fn is_self_ident(&mut self) -> bool { match self.token { - token::IDENT(id, false) => id.name == special_idents::self_.name, + token::Ident(id, false) => id.name == special_idents::self_.name, _ => false } } fn expect_self_ident(&mut self) -> ast::Ident { match self.token { - token::IDENT(id, false) if id.name == special_idents::self_.name => { + token::Ident(id, false) if id.name == special_idents::self_.name => { self.bump(); id }, @@ -4178,7 +4177,7 @@ impl<'a> Parser<'a> { } } - self.expect(&token::LPAREN); + self.expect(&token::LParen); // A bit of complexity and lookahead is needed here in order to be // backwards compatible. @@ -4188,13 +4187,13 @@ impl<'a> Parser<'a> { let mut mutbl_self = MutImmutable; let explicit_self = match self.token { - token::BINOP(token::AND) => { + token::BinOp(token::And) => { let eself = maybe_parse_borrowed_explicit_self(self); self_ident_lo = self.last_span.lo; self_ident_hi = self.last_span.hi; eself } - token::TILDE => { + token::Tilde => { // We need to make sure it isn't a type if self.look_ahead(1, |t| token::is_keyword(keywords::Self, t)) { self.bump(); @@ -4204,7 +4203,7 @@ impl<'a> Parser<'a> { } SelfStatic } - token::BINOP(token::STAR) => { + token::BinOp(token::Star) => { // Possibly "*self" or "*mut self" -- not supported. Try to avoid // emitting cryptic "unexpected token" errors. self.bump(); @@ -4221,13 +4220,13 @@ impl<'a> Parser<'a> { // error case, making bogus self ident: SelfValue(special_idents::self_) } - token::IDENT(..) => { + token::Ident(..) => { if self.is_self_ident() { let self_ident = self.expect_self_ident(); // Determine whether this is the fully explicit form, `self: // TYPE`. - if self.eat(&token::COLON) { + if self.eat(&token::Colon) { SelfExplicit(self.parse_ty(false), self_ident) } else { SelfValue(self_ident) @@ -4241,13 +4240,13 @@ impl<'a> Parser<'a> { // Determine whether this is the fully explicit form, // `self: TYPE`. - if self.eat(&token::COLON) { + if self.eat(&token::Colon) { SelfExplicit(self.parse_ty(false), self_ident) } else { SelfValue(self_ident) } } else if Parser::token_is_mutability(&self.token) && - self.look_ahead(1, |t| *t == token::TILDE) && + self.look_ahead(1, |t| *t == token::Tilde) && self.look_ahead(2, |t| { token::is_keyword(keywords::Self, t) }) { @@ -4273,18 +4272,18 @@ impl<'a> Parser<'a> { { // If we parsed a self type, expect a comma before the argument list. match self.token { - token::COMMA => { + token::Comma => { self.bump(); - let sep = seq_sep_trailing_allowed(token::COMMA); + let sep = seq_sep_trailing_allowed(token::Comma); let mut fn_inputs = self.parse_seq_to_before_end( - &token::RPAREN, + &token::RParen, sep, parse_arg_fn ); fn_inputs.insert(0, Arg::new_self(explicit_self_sp, mutbl_self, $self_id)); fn_inputs } - token::RPAREN => { + token::RParen => { vec!(Arg::new_self(explicit_self_sp, mutbl_self, $self_id)) } _ => { @@ -4298,8 +4297,8 @@ impl<'a> Parser<'a> { let fn_inputs = match explicit_self { SelfStatic => { - let sep = seq_sep_trailing_allowed(token::COMMA); - self.parse_seq_to_before_end(&token::RPAREN, sep, parse_arg_fn) + let sep = seq_sep_trailing_allowed(token::Comma); + self.parse_seq_to_before_end(&token::RParen, sep, parse_arg_fn) } SelfValue(id) => parse_remaining_arguments!(id), SelfRegion(_,_,id) => parse_remaining_arguments!(id), @@ -4307,7 +4306,7 @@ impl<'a> Parser<'a> { }; - self.expect(&token::RPAREN); + self.expect(&token::RParen); let hi = self.span.hi; @@ -4327,22 +4326,22 @@ impl<'a> Parser<'a> { fn parse_fn_block_decl(&mut self) -> (P, Option) { let (optional_unboxed_closure_kind, inputs_captures) = { - if self.eat(&token::OROR) { + if self.eat(&token::OrOr) { (None, Vec::new()) } else { - self.expect(&token::BINOP(token::OR)); + self.expect(&token::BinOp(token::Or)); let optional_unboxed_closure_kind = self.parse_optional_unboxed_closure_kind(); let args = self.parse_seq_to_before_end( - &token::BINOP(token::OR), - seq_sep_trailing_allowed(token::COMMA), + &token::BinOp(token::Or), + seq_sep_trailing_allowed(token::Comma), |p| p.parse_fn_block_arg() ); self.bump(); (optional_unboxed_closure_kind, args) } }; - let (style, output) = if self.token == token::RARROW { + let (style, output) = if self.token == token::RArrow { self.parse_ret_ty() } else { (Return, P(Ty { @@ -4363,12 +4362,12 @@ impl<'a> Parser<'a> { /// Parses the `(arg, arg) -> return_type` header on a procedure. fn parse_proc_decl(&mut self) -> P { let inputs = - self.parse_unspanned_seq(&token::LPAREN, - &token::RPAREN, - seq_sep_trailing_allowed(token::COMMA), + self.parse_unspanned_seq(&token::LParen, + &token::RParen, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_fn_block_arg()); - let (style, output) = if self.token == token::RARROW { + let (style, output) = if self.token == token::RArrow { self.parse_ret_ty() } else { (Return, P(Ty { @@ -4432,12 +4431,12 @@ impl<'a> Parser<'a> { // code copied from parse_macro_use_or_failure... abstraction! let (method_, hi, new_attrs) = { if !token::is_any_keyword(&self.token) - && self.look_ahead(1, |t| *t == token::NOT) - && (self.look_ahead(2, |t| *t == token::LPAREN) - || self.look_ahead(2, |t| *t == token::LBRACE)) { + && self.look_ahead(1, |t| *t == token::Not) + && (self.look_ahead(2, |t| *t == token::LParen) + || self.look_ahead(2, |t| *t == token::LBrace)) { // method macro. let pth = self.parse_path(NoTypesAllowed).path; - self.expect(&token::NOT); + self.expect(&token::Not); // eat a matched-delimiter token tree: let tts = match token::close_delimiter_for(&self.token) { @@ -4512,10 +4511,10 @@ impl<'a> Parser<'a> { fn parse_impl_items(&mut self) -> (Vec, Vec) { let mut impl_items = Vec::new(); - self.expect(&token::LBRACE); + self.expect(&token::LBrace); let (inner_attrs, mut method_attrs) = self.parse_inner_attrs_and_next(); - while !self.eat(&token::RBRACE) { + while !self.eat(&token::RBrace) { method_attrs.extend(self.parse_outer_attributes().into_iter()); let vis = self.parse_visibility(); if self.eat_keyword(keywords::Type) { @@ -4541,7 +4540,7 @@ impl<'a> Parser<'a> { // Special case: if the next identifier that follows is '(', don't // allow this to be parsed as a trait. - let could_be_trait = self.token != token::LPAREN; + let could_be_trait = self.token != token::LParen; // Parse the trait. let mut ty = self.parse_ty(true); @@ -4589,7 +4588,7 @@ impl<'a> Parser<'a> { let class_name = self.parse_ident(); let mut generics = self.parse_generics(); - if self.eat(&token::COLON) { + if self.eat(&token::Colon) { let ty = self.parse_ty(true); self.span_err(ty.span, "`virtual` structs have been removed from the language"); } @@ -4599,11 +4598,11 @@ impl<'a> Parser<'a> { let mut fields: Vec; let is_tuple_like; - if self.eat(&token::LBRACE) { + if self.eat(&token::LBrace) { // It's a record-like struct. is_tuple_like = false; fields = Vec::new(); - while self.token != token::RBRACE { + while self.token != token::RBrace { fields.push(self.parse_struct_decl_field()); } if fields.len() == 0 { @@ -4612,13 +4611,13 @@ impl<'a> Parser<'a> { token::get_ident(class_name)).as_slice()); } self.bump(); - } else if self.token == token::LPAREN { + } else if self.token == token::LParen { // It's a tuple-like struct. is_tuple_like = true; fields = self.parse_unspanned_seq( - &token::LPAREN, - &token::RPAREN, - seq_sep_trailing_allowed(token::COMMA), + &token::LParen, + &token::RParen, + seq_sep_trailing_allowed(token::Comma), |p| { let attrs = p.parse_outer_attributes(); let lo = p.span.lo; @@ -4635,8 +4634,8 @@ impl<'a> Parser<'a> { written as `struct {};`", token::get_ident(class_name)).as_slice()); } - self.expect(&token::SEMI); - } else if self.eat(&token::SEMI) { + self.expect(&token::Semi); + } else if self.eat(&token::Semi) { // It's a unit-like struct. is_tuple_like = true; fields = Vec::new(); @@ -4664,10 +4663,10 @@ impl<'a> Parser<'a> { -> StructField { let a_var = self.parse_name_and_ty(vis, attrs); match self.token { - token::COMMA => { + token::Comma => { self.bump(); } - token::RBRACE => {} + token::RBrace => {} _ => { let span = self.span; let token_str = self.this_token_to_string(); @@ -4701,7 +4700,7 @@ impl<'a> Parser<'a> { if self.eat_keyword(keywords::For) { let span = self.span; let ident = self.parse_ident(); - if !self.eat(&token::QUESTION) { + if !self.eat(&token::Question) { self.span_err(span, "expected 'Sized?' after `for` in trait item"); return None; @@ -4776,11 +4775,11 @@ impl<'a> Parser<'a> { fn parse_item_const(&mut self, m: Option) -> ItemInfo { let id = self.parse_ident(); - self.expect(&token::COLON); + self.expect(&token::Colon); let ty = self.parse_ty(true); - self.expect(&token::EQ); + self.expect(&token::Eq); let e = self.parse_expr(); - self.commit_expr_expecting(&*e, token::SEMI); + self.commit_expr_expecting(&*e, token::Semi); let item = match m { Some(m) => ItemStatic(ty, m, e), None => ItemConst(ty, e), @@ -4792,20 +4791,20 @@ impl<'a> Parser<'a> { fn parse_item_mod(&mut self, outer_attrs: &[Attribute]) -> ItemInfo { let id_span = self.span; let id = self.parse_ident(); - if self.token == token::SEMI { + if self.token == token::Semi { self.bump(); // This mod is in an external file. Let's go get it! let (m, attrs) = self.eval_src_mod(id, outer_attrs, id_span); (id, m, Some(attrs)) } else { self.push_mod_path(id, outer_attrs); - self.expect(&token::LBRACE); + self.expect(&token::LBrace); let mod_inner_lo = self.span.lo; let old_owns_directory = self.owns_directory; self.owns_directory = true; let (inner, next) = self.parse_inner_attrs_and_next(); - let m = self.parse_mod_items(token::RBRACE, next, mod_inner_lo); - self.expect(&token::RBRACE); + let m = self.parse_mod_items(token::RBrace, next, mod_inner_lo); + self.expect(&token::RBrace); self.owns_directory = old_owns_directory; self.pop_mod_path(); (id, ItemMod(m), Some(inner)) @@ -4929,7 +4928,7 @@ impl<'a> Parser<'a> { let mod_inner_lo = p0.span.lo; let (mod_attrs, next) = p0.parse_inner_attrs_and_next(); let first_item_outer_attrs = next; - let m0 = p0.parse_mod_items(token::EOF, first_item_outer_attrs, mod_inner_lo); + let m0 = p0.parse_mod_items(token::Eof, first_item_outer_attrs, mod_inner_lo); self.sess.included_mod_stack.borrow_mut().pop(); return (ast::ItemMod(m0), mod_attrs); } @@ -4944,7 +4943,7 @@ impl<'a> Parser<'a> { let decl = self.parse_fn_decl(true); self.parse_where_clause(&mut generics); let hi = self.span.hi; - self.expect(&token::SEMI); + self.expect(&token::Semi); P(ast::ForeignItem { ident: ident, attrs: attrs, @@ -4964,10 +4963,10 @@ impl<'a> Parser<'a> { let mutbl = self.eat_keyword(keywords::Mut); let ident = self.parse_ident(); - self.expect(&token::COLON); + self.expect(&token::Colon); let ty = self.parse_ty(true); let hi = self.span.hi; - self.expect(&token::SEMI); + self.expect(&token::Semi); P(ForeignItem { ident: ident, attrs: attrs, @@ -5006,7 +5005,7 @@ impl<'a> Parser<'a> { self.span_err(last_span, Parser::expected_item_err(attrs_remaining.as_slice())); } - assert!(self.token == token::RBRACE); + assert!(self.token == token::RBrace); ast::ForeignMod { abi: abi, view_items: view_items, @@ -5029,9 +5028,9 @@ impl<'a> Parser<'a> { let span = self.span; let (maybe_path, ident) = match self.token { - token::IDENT(..) => { + token::Ident(..) => { let the_ident = self.parse_ident(); - let path = if self.eat(&token::EQ) { + let path = if self.eat(&token::Eq) { let path = self.parse_str(); let span = self.span; self.obsolete(span, ObsoleteExternCrateRenaming); @@ -5048,14 +5047,14 @@ impl<'a> Parser<'a> { } else { None }; - self.expect(&token::SEMI); + self.expect(&token::Semi); (path, the_ident) }, - token::LIT_STR(..) | token::LIT_STR_RAW(..) => { + token::LitStr(..) | token::LitStrRaw(..) => { let path = self.parse_str(); self.expect_keyword(keywords::As); let the_ident = self.parse_ident(); - self.expect(&token::SEMI); + self.expect(&token::Semi); (Some(path), the_ident) }, _ => { @@ -5093,13 +5092,13 @@ impl<'a> Parser<'a> { attrs: Vec ) -> ItemOrViewItem { - self.expect(&token::LBRACE); + self.expect(&token::LBrace); let abi = opt_abi.unwrap_or(abi::C); let (inner, next) = self.parse_inner_attrs_and_next(); let m = self.parse_foreign_mod_items(abi, next); - self.expect(&token::RBRACE); + self.expect(&token::RBrace); let last_span = self.last_span; let item = self.mk_item(lo, @@ -5116,9 +5115,9 @@ impl<'a> Parser<'a> { let ident = self.parse_ident(); let mut tps = self.parse_generics(); self.parse_where_clause(&mut tps); - self.expect(&token::EQ); + self.expect(&token::Eq); let ty = self.parse_ty(true); - self.expect(&token::SEMI); + self.expect(&token::Semi); (ident, ItemTy(ty, tps), None) } @@ -5126,7 +5125,7 @@ impl<'a> Parser<'a> { /// this should probably be renamed or refactored... fn parse_struct_def(&mut self) -> P { let mut fields: Vec = Vec::new(); - while self.token != token::RBRACE { + while self.token != token::RBrace { fields.push(self.parse_struct_decl_field()); } self.bump(); @@ -5142,7 +5141,7 @@ impl<'a> Parser<'a> { let mut variants = Vec::new(); let mut all_nullary = true; let mut any_disr = None; - while self.token != token::RBRACE { + while self.token != token::RBrace { let variant_attrs = self.parse_outer_attributes(); let vlo = self.span.lo; @@ -5153,16 +5152,16 @@ impl<'a> Parser<'a> { let mut args = Vec::new(); let mut disr_expr = None; ident = self.parse_ident(); - if self.eat(&token::LBRACE) { + if self.eat(&token::LBrace) { // Parse a struct variant. all_nullary = false; kind = StructVariantKind(self.parse_struct_def()); - } else if self.token == token::LPAREN { + } else if self.token == token::LParen { all_nullary = false; let arg_tys = self.parse_enum_variant_seq( - &token::LPAREN, - &token::RPAREN, - seq_sep_trailing_allowed(token::COMMA), + &token::LParen, + &token::RParen, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_ty(true) ); for ty in arg_tys.into_iter() { @@ -5172,7 +5171,7 @@ impl<'a> Parser<'a> { }); } kind = TupleVariantKind(args); - } else if self.eat(&token::EQ) { + } else if self.eat(&token::Eq) { disr_expr = Some(self.parse_expr()); any_disr = disr_expr.as_ref().map(|expr| expr.span); kind = TupleVariantKind(args); @@ -5190,9 +5189,9 @@ impl<'a> Parser<'a> { }; variants.push(P(spanned(vlo, self.last_span.hi, vr))); - if !self.eat(&token::COMMA) { break; } + if !self.eat(&token::Comma) { break; } } - self.expect(&token::RBRACE); + self.expect(&token::RBrace); match any_disr { Some(disr_span) if !all_nullary => self.span_err(disr_span, @@ -5208,7 +5207,7 @@ impl<'a> Parser<'a> { let id = self.parse_ident(); let mut generics = self.parse_generics(); self.parse_where_clause(&mut generics); - self.expect(&token::LBRACE); + self.expect(&token::LBrace); let enum_definition = self.parse_enum_def(&generics); (id, ItemEnum(enum_definition, generics), None) @@ -5216,7 +5215,7 @@ impl<'a> Parser<'a> { fn fn_expr_lookahead(tok: &token::Token) -> bool { match *tok { - token::LPAREN | token::AT | token::TILDE | token::BINOP(_) => true, + token::LParen | token::At | token::Tilde | token::BinOp(_) => true, _ => false } } @@ -5225,7 +5224,7 @@ impl<'a> Parser<'a> { /// the `extern` keyword, if one is found. fn parse_opt_abi(&mut self) -> Option { match self.token { - token::LIT_STR(s) | token::LIT_STR_RAW(s, _) => { + token::LitStr(s) | token::LitStrRaw(s, _) => { self.bump(); let the_string = s.as_str(); match abi::lookup(the_string) { @@ -5256,7 +5255,7 @@ impl<'a> Parser<'a> { macros_allowed: bool) -> ItemOrViewItem { let nt_item = match self.token { - INTERPOLATED(token::NtItem(ref item)) => { + token::Interpolated(token::NtItem(ref item)) => { Some((**item).clone()) } _ => None @@ -5280,7 +5279,7 @@ impl<'a> Parser<'a> { if self.eat_keyword(keywords::Use) { // USE ITEM (IoviViewItem) let view_item = self.parse_use(); - self.expect(&token::SEMI); + self.expect(&token::Semi); return IoviViewItem(ast::ViewItem { node: view_item, attrs: attrs, @@ -5319,7 +5318,7 @@ impl<'a> Parser<'a> { visibility, maybe_append(attrs, extra_attrs)); return IoviItem(item); - } else if self.token == token::LBRACE { + } else if self.token == token::LBrace { return self.parse_item_foreign_mod(lo, opt_abi, visibility, attrs); } @@ -5384,7 +5383,7 @@ impl<'a> Parser<'a> { return IoviItem(item); } if self.is_keyword(keywords::Unsafe) - && self.look_ahead(1u, |t| *t != token::LBRACE) { + && self.look_ahead(1u, |t| *t != token::LBrace) { // UNSAFE FUNCTION ITEM self.bump(); let abi = if self.eat_keyword(keywords::Extern) { @@ -5512,15 +5511,15 @@ impl<'a> Parser<'a> { visibility: Visibility ) -> ItemOrViewItem { if macros_allowed && !token::is_any_keyword(&self.token) - && self.look_ahead(1, |t| *t == token::NOT) + && self.look_ahead(1, |t| *t == token::Not) && (self.look_ahead(2, |t| is_plain_ident(t)) - || self.look_ahead(2, |t| *t == token::LPAREN) - || self.look_ahead(2, |t| *t == token::LBRACE)) { + || self.look_ahead(2, |t| *t == token::LParen) + || self.look_ahead(2, |t| *t == token::LBrace)) { // MACRO INVOCATION ITEM // item macro. let pth = self.parse_path(NoTypesAllowed).path; - self.expect(&token::NOT); + self.expect(&token::Not); // a 'special' identifier (like what `macro_rules!` uses) // is optional. We should eventually unify invoc syntax @@ -5601,11 +5600,11 @@ impl<'a> Parser<'a> { fn parse_view_path(&mut self) -> P { let lo = self.span.lo; - if self.token == token::LBRACE { + if self.token == token::LBrace { // use {foo,bar} let idents = self.parse_unspanned_seq( - &token::LBRACE, &token::RBRACE, - seq_sep_trailing_allowed(token::COMMA), + &token::LBrace, &token::RBrace, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_path_list_item()); let path = ast::Path { span: mk_sp(lo, self.span.hi), @@ -5619,12 +5618,12 @@ impl<'a> Parser<'a> { let first_ident = self.parse_ident(); let mut path = vec!(first_ident); match self.token { - token::EQ => { + token::Eq => { // x = foo::bar self.bump(); let path_lo = self.span.lo; path = vec!(self.parse_ident()); - while self.token == token::MOD_SEP { + while self.token == token::ModSep { self.bump(); let id = self.parse_ident(); path.push(id); @@ -5647,23 +5646,23 @@ impl<'a> Parser<'a> { ast::DUMMY_NODE_ID))); } - token::MOD_SEP => { + token::ModSep => { // foo::bar or foo::{a,b,c} or foo::* - while self.token == token::MOD_SEP { + while self.token == token::ModSep { self.bump(); match self.token { - token::IDENT(i, _) => { + token::Ident(i, _) => { self.bump(); path.push(i); } // foo::bar::{a,b,c} - token::LBRACE => { + token::LBrace => { let idents = self.parse_unspanned_seq( - &token::LBRACE, - &token::RBRACE, - seq_sep_trailing_allowed(token::COMMA), + &token::LBrace, + &token::RBrace, + seq_sep_trailing_allowed(token::Comma), |p| p.parse_path_list_item() ); let path = ast::Path { @@ -5682,7 +5681,7 @@ impl<'a> Parser<'a> { } // foo::bar::* - token::BINOP(token::STAR) => { + token::BinOp(token::Star) => { self.bump(); let path = ast::Path { span: mk_sp(lo, self.span.hi), @@ -5821,7 +5820,7 @@ impl<'a> Parser<'a> { loop { match self.parse_foreign_item(attrs, macros_allowed) { IoviNone(returned_attrs) => { - if self.token == token::RBRACE { + if self.token == token::RBrace { attrs = returned_attrs; break } @@ -5860,7 +5859,7 @@ impl<'a> Parser<'a> { let (inner, next) = self.parse_inner_attrs_and_next(); let first_item_outer_attrs = next; // parse the items inside the crate: - let m = self.parse_mod_items(token::EOF, first_item_outer_attrs, lo); + let m = self.parse_mod_items(token::Eof, first_item_outer_attrs, lo); ast::Crate { module: m, @@ -5874,8 +5873,8 @@ impl<'a> Parser<'a> { pub fn parse_optional_str(&mut self) -> Option<(InternedString, ast::StrStyle)> { let (s, style) = match self.token { - token::LIT_STR(s) => (self.id_to_interned_str(s.ident()), ast::CookedStr), - token::LIT_STR_RAW(s, n) => { + token::LitStr(s) => (self.id_to_interned_str(s.ident()), ast::CookedStr), + token::LitStrRaw(s, n) => { (self.id_to_interned_str(s.ident()), ast::RawStr(n)) } _ => return None diff --git a/src/libsyntax/parse/token.rs b/src/libsyntax/parse/token.rs index fa6b0c5ad4ae7..0cc56b2ab2b32 100644 --- a/src/libsyntax/parse/token.rs +++ b/src/libsyntax/parse/token.rs @@ -9,9 +9,7 @@ // except according to those terms. use ast; -use ast::{Ident, Name, Mrk}; use ext::mtwt; -use parse::token; use ptr::P; use util::interner::{RcStr, StrInterner}; use util::interner; @@ -22,94 +20,157 @@ use std::mem; use std::path::BytesContainer; use std::rc::Rc; +// NOTE(stage0): remove these re-exports after the next snapshot +// (needed to allow quotations to pass stage0) +#[cfg(stage0)] pub use self::Plus as PLUS; +#[cfg(stage0)] pub use self::Minus as MINUS; +#[cfg(stage0)] pub use self::Star as STAR; +#[cfg(stage0)] pub use self::Slash as SLASH; +#[cfg(stage0)] pub use self::Percent as PERCENT; +#[cfg(stage0)] pub use self::Caret as CARET; +#[cfg(stage0)] pub use self::And as AND; +#[cfg(stage0)] pub use self::Or as OR; +#[cfg(stage0)] pub use self::Shl as SHL; +#[cfg(stage0)] pub use self::Shr as SHR; +#[cfg(stage0)] pub use self::Eq as EQ; +#[cfg(stage0)] pub use self::Lt as LT; +#[cfg(stage0)] pub use self::Le as LE; +#[cfg(stage0)] pub use self::EqEq as EQEQ; +#[cfg(stage0)] pub use self::Ne as NE; +#[cfg(stage0)] pub use self::Ge as GE; +#[cfg(stage0)] pub use self::Gt as GT; +#[cfg(stage0)] pub use self::AndAnd as ANDAND; +#[cfg(stage0)] pub use self::OrOr as OROR; +#[cfg(stage0)] pub use self::Not as NOT; +#[cfg(stage0)] pub use self::Tilde as TILDE; +#[cfg(stage0)] pub use self::BinOp as BINOP; +#[cfg(stage0)] pub use self::BinOpEq as BINOPEQ; +#[cfg(stage0)] pub use self::At as AT; +#[cfg(stage0)] pub use self::Dot as DOT; +#[cfg(stage0)] pub use self::DotDot as DOTDOT; +#[cfg(stage0)] pub use self::DotDotDot as DOTDOTDOT; +#[cfg(stage0)] pub use self::Comma as COMMA; +#[cfg(stage0)] pub use self::Semi as SEMI; +#[cfg(stage0)] pub use self::Colon as COLON; +#[cfg(stage0)] pub use self::ModSep as MOD_SEP; +#[cfg(stage0)] pub use self::RArrow as RARROW; +#[cfg(stage0)] pub use self::LArrow as LARROW; +#[cfg(stage0)] pub use self::FatArrow as FAT_ARROW; +#[cfg(stage0)] pub use self::LParen as LPAREN; +#[cfg(stage0)] pub use self::RParen as RPAREN; +#[cfg(stage0)] pub use self::LBracket as LBRACKET; +#[cfg(stage0)] pub use self::RBracket as RBRACKET; +#[cfg(stage0)] pub use self::LBrace as LBRACE; +#[cfg(stage0)] pub use self::RBrace as RBRACE; +#[cfg(stage0)] pub use self::Pound as POUND; +#[cfg(stage0)] pub use self::Dollar as DOLLAR; +#[cfg(stage0)] pub use self::Question as QUESTION; +#[cfg(stage0)] pub use self::LitByte as LIT_BYTE; +#[cfg(stage0)] pub use self::LitChar as LIT_CHAR; +#[cfg(stage0)] pub use self::LitInteger as LIT_INTEGER; +#[cfg(stage0)] pub use self::LitFloat as LIT_FLOAT; +#[cfg(stage0)] pub use self::LitStr as LIT_STR; +#[cfg(stage0)] pub use self::LitStrRaw as LIT_STR_RAW; +#[cfg(stage0)] pub use self::LitBinary as LIT_BINARY; +#[cfg(stage0)] pub use self::LitBinaryRaw as LIT_BINARY_RAW; +#[cfg(stage0)] pub use self::Ident as IDENT; +#[cfg(stage0)] pub use self::Underscore as UNDERSCORE; +#[cfg(stage0)] pub use self::Lifetime as LIFETIME; +#[cfg(stage0)] pub use self::Interpolated as INTERPOLATED; +#[cfg(stage0)] pub use self::DocComment as DOC_COMMENT; +#[cfg(stage0)] pub use self::Whitespace as WS; +#[cfg(stage0)] pub use self::Comment as COMMENT; +#[cfg(stage0)] pub use self::Shebang as SHEBANG; +#[cfg(stage0)] pub use self::Eof as EOF; + #[allow(non_camel_case_types)] #[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)] -pub enum BinOp { - PLUS, - MINUS, - STAR, - SLASH, - PERCENT, - CARET, - AND, - OR, - SHL, - SHR, +pub enum BinOpToken { + Plus, + Minus, + Star, + Slash, + Percent, + Caret, + And, + Or, + Shl, + Shr, } #[allow(non_camel_case_types)] #[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash, Show)] pub enum Token { /* Expression-operator symbols. */ - EQ, - LT, - LE, - EQEQ, - NE, - GE, - GT, - ANDAND, - OROR, - NOT, - TILDE, - BINOP(BinOp), - BINOPEQ(BinOp), + Eq, + Lt, + Le, + EqEq, + Ne, + Ge, + Gt, + AndAnd, + OrOr, + Not, + Tilde, + BinOp(BinOpToken), + BinOpEq(BinOpToken), /* Structural symbols */ - AT, - DOT, - DOTDOT, - DOTDOTDOT, - COMMA, - SEMI, - COLON, - MOD_SEP, - RARROW, - LARROW, - FAT_ARROW, - LPAREN, - RPAREN, - LBRACKET, - RBRACKET, - LBRACE, - RBRACE, - POUND, - DOLLAR, - QUESTION, + At, + Dot, + DotDot, + DotDotDot, + Comma, + Semi, + Colon, + ModSep, + RArrow, + LArrow, + FatArrow, + LParen, + RParen, + LBracket, + RBracket, + LBrace, + RBrace, + Pound, + Dollar, + Question, /* Literals */ - LIT_BYTE(Name), - LIT_CHAR(Name), - LIT_INTEGER(Name), - LIT_FLOAT(Name), - LIT_STR(Name), - LIT_STR_RAW(Name, uint), /* raw str delimited by n hash symbols */ - LIT_BINARY(Name), - LIT_BINARY_RAW(Name, uint), /* raw binary str delimited by n hash symbols */ + LitByte(ast::Name), + LitChar(ast::Name), + LitInteger(ast::Name), + LitFloat(ast::Name), + LitStr(ast::Name), + LitStrRaw(ast::Name, uint), /* raw str delimited by n hash symbols */ + LitBinary(ast::Name), + LitBinaryRaw(ast::Name, uint), /* raw binary str delimited by n hash symbols */ /* Name components */ /// An identifier contains an "is_mod_name" boolean, /// indicating whether :: follows this token with no /// whitespace in between. - IDENT(Ident, bool), - UNDERSCORE, - LIFETIME(Ident), + Ident(ast::Ident, bool), + Underscore, + Lifetime(ast::Ident), /* For interpolation */ - INTERPOLATED(Nonterminal), - DOC_COMMENT(Name), + Interpolated(Nonterminal), + DocComment(ast::Name), // Junk. These carry no data because we don't really care about the data // they *would* carry, and don't really want to allocate a new ident for // them. Instead, users could extract that from the associated span. /// Whitespace - WS, + Whitespace, /// Comment - COMMENT, - SHEBANG(Name), + Comment, + Shebang(ast::Name), - EOF, + Eof, } #[deriving(Clone, Encodable, Decodable, PartialEq, Eq, Hash)] @@ -122,7 +183,7 @@ pub enum Nonterminal { NtExpr( P), NtTy( P), /// See IDENT, above, for meaning of bool in NtIdent: - NtIdent(Box, bool), + NtIdent(Box, bool), /// Stuff inside brackets for attributes NtMeta( P), NtPath(Box), @@ -148,161 +209,131 @@ impl fmt::Show for Nonterminal { } } -pub fn binop_to_string(o: BinOp) -> &'static str { +pub fn binop_to_string(o: BinOpToken) -> &'static str { match o { - PLUS => "+", - MINUS => "-", - STAR => "*", - SLASH => "/", - PERCENT => "%", - CARET => "^", - AND => "&", - OR => "|", - SHL => "<<", - SHR => ">>" + Plus => "+", + Minus => "-", + Star => "*", + Slash => "/", + Percent => "%", + Caret => "^", + And => "&", + Or => "|", + Shl => "<<", + Shr => ">>", } } pub fn to_string(t: &Token) -> String { match *t { - EQ => "=".into_string(), - LT => "<".into_string(), - LE => "<=".into_string(), - EQEQ => "==".into_string(), - NE => "!=".into_string(), - GE => ">=".into_string(), - GT => ">".into_string(), - NOT => "!".into_string(), - TILDE => "~".into_string(), - OROR => "||".into_string(), - ANDAND => "&&".into_string(), - BINOP(op) => binop_to_string(op).into_string(), - BINOPEQ(op) => { - let mut s = binop_to_string(op).into_string(); - s.push_str("="); - s - } - - /* Structural symbols */ - AT => "@".into_string(), - DOT => ".".into_string(), - DOTDOT => "..".into_string(), - DOTDOTDOT => "...".into_string(), - COMMA => ",".into_string(), - SEMI => ";".into_string(), - COLON => ":".into_string(), - MOD_SEP => "::".into_string(), - RARROW => "->".into_string(), - LARROW => "<-".into_string(), - FAT_ARROW => "=>".into_string(), - LPAREN => "(".into_string(), - RPAREN => ")".into_string(), - LBRACKET => "[".into_string(), - RBRACKET => "]".into_string(), - LBRACE => "{".into_string(), - RBRACE => "}".into_string(), - POUND => "#".into_string(), - DOLLAR => "$".into_string(), - QUESTION => "?".into_string(), - - /* Literals */ - LIT_BYTE(b) => { - format!("b'{}'", b.as_str()) - } - LIT_CHAR(c) => { - format!("'{}'", c.as_str()) - } - LIT_INTEGER(c) | LIT_FLOAT(c) => { - c.as_str().into_string() - } - - LIT_STR(s) => { - format!("\"{}\"", s.as_str()) - } - LIT_STR_RAW(s, n) => { - format!("r{delim}\"{string}\"{delim}", - delim="#".repeat(n), string=s.as_str()) - } - LIT_BINARY(v) => { - format!("b\"{}\"", v.as_str()) - } - LIT_BINARY_RAW(s, n) => { - format!("br{delim}\"{string}\"{delim}", - delim="#".repeat(n), string=s.as_str()) - } - - /* Name components */ - IDENT(s, _) => get_ident(s).get().into_string(), - LIFETIME(s) => { - format!("{}", get_ident(s)) - } - UNDERSCORE => "_".into_string(), - - /* Other */ - DOC_COMMENT(s) => s.as_str().into_string(), - EOF => "".into_string(), - WS => " ".into_string(), - COMMENT => "/* */".into_string(), - SHEBANG(s) => format!("/* shebang: {}*/", s.as_str()), - - INTERPOLATED(ref nt) => { - match nt { - &NtExpr(ref e) => ::print::pprust::expr_to_string(&**e), - &NtMeta(ref e) => ::print::pprust::meta_item_to_string(&**e), - &NtTy(ref e) => ::print::pprust::ty_to_string(&**e), - &NtPath(ref e) => ::print::pprust::path_to_string(&**e), - _ => { - let mut s = "an interpolated ".into_string(); - match *nt { - NtItem(..) => s.push_str("item"), - NtBlock(..) => s.push_str("block"), - NtStmt(..) => s.push_str("statement"), - NtPat(..) => s.push_str("pattern"), - NtMeta(..) => fail!("should have been handled"), - NtExpr(..) => fail!("should have been handled"), - NtTy(..) => fail!("should have been handled"), - NtIdent(..) => s.push_str("identifier"), - NtPath(..) => fail!("should have been handled"), - NtTT(..) => s.push_str("tt"), - NtMatchers(..) => s.push_str("matcher sequence") - }; - s - } + Eq => "=".into_string(), + Lt => "<".into_string(), + Le => "<=".into_string(), + EqEq => "==".into_string(), + Ne => "!=".into_string(), + Ge => ">=".into_string(), + Gt => ">".into_string(), + Not => "!".into_string(), + Tilde => "~".into_string(), + OrOr => "||".into_string(), + AndAnd => "&&".into_string(), + BinOp(op) => binop_to_string(op).into_string(), + BinOpEq(op) => format!("{}=", binop_to_string(op)), + + /* Structural symbols */ + At => "@".into_string(), + Dot => ".".into_string(), + DotDot => "..".into_string(), + DotDotDot => "...".into_string(), + Comma => ",".into_string(), + Semi => ";".into_string(), + Colon => ":".into_string(), + ModSep => "::".into_string(), + RArrow => "->".into_string(), + LArrow => "<-".into_string(), + FatArrow => "=>".into_string(), + LParen => "(".into_string(), + RParen => ")".into_string(), + LBracket => "[".into_string(), + RBracket => "]".into_string(), + LBrace => "{".into_string(), + RBrace => "}".into_string(), + Pound => "#".into_string(), + Dollar => "$".into_string(), + Question => "?".into_string(), + + /* Literals */ + LitByte(b) => format!("b'{}'", b.as_str()), + LitChar(c) => format!("'{}'", c.as_str()), + LitFloat(c) => c.as_str().into_string(), + LitInteger(c) => c.as_str().into_string(), + LitStr(s) => format!("\"{}\"", s.as_str()), + LitStrRaw(s, n) => format!("r{delim}\"{string}\"{delim}", + delim="#".repeat(n), + string=s.as_str()), + LitBinary(v) => format!("b\"{}\"", v.as_str()), + LitBinaryRaw(s, n) => format!("br{delim}\"{string}\"{delim}", + delim="#".repeat(n), + string=s.as_str()), + + /* Name components */ + Ident(s, _) => get_ident(s).get().into_string(), + Lifetime(s) => format!("{}", get_ident(s)), + Underscore => "_".into_string(), + + /* Other */ + DocComment(s) => s.as_str().into_string(), + Eof => "".into_string(), + Whitespace => " ".into_string(), + Comment => "/* */".into_string(), + Shebang(s) => format!("/* shebang: {}*/", s.as_str()), + + Interpolated(ref nt) => match *nt { + NtExpr(ref e) => ::print::pprust::expr_to_string(&**e), + NtMeta(ref e) => ::print::pprust::meta_item_to_string(&**e), + NtTy(ref e) => ::print::pprust::ty_to_string(&**e), + NtPath(ref e) => ::print::pprust::path_to_string(&**e), + NtItem(..) => "an interpolated item".into_string(), + NtBlock(..) => "an interpolated block".into_string(), + NtStmt(..) => "an interpolated statement".into_string(), + NtPat(..) => "an interpolated pattern".into_string(), + NtIdent(..) => "an interpolated identifier".into_string(), + NtTT(..) => "an interpolated tt".into_string(), + NtMatchers(..) => "an interpolated matcher sequence".into_string(), } - } } } pub fn can_begin_expr(t: &Token) -> bool { match *t { - LPAREN => true, - LBRACE => true, - LBRACKET => true, - IDENT(_, _) => true, - UNDERSCORE => true, - TILDE => true, - LIT_BYTE(_) => true, - LIT_CHAR(_) => true, - LIT_INTEGER(_) => true, - LIT_FLOAT(_) => true, - LIT_STR(_) => true, - LIT_STR_RAW(_, _) => true, - LIT_BINARY(_) => true, - LIT_BINARY_RAW(_, _) => true, - POUND => true, - AT => true, - NOT => true, - BINOP(MINUS) => true, - BINOP(STAR) => true, - BINOP(AND) => true, - BINOP(OR) => true, // in lambda syntax - OROR => true, // in lambda syntax - MOD_SEP => true, - INTERPOLATED(NtExpr(..)) - | INTERPOLATED(NtIdent(..)) - | INTERPOLATED(NtBlock(..)) - | INTERPOLATED(NtPath(..)) => true, - _ => false + LParen => true, + LBrace => true, + LBracket => true, + Ident(_, _) => true, + Underscore => true, + Tilde => true, + LitByte(_) => true, + LitChar(_) => true, + LitInteger(_) => true, + LitFloat(_) => true, + LitStr(_) => true, + LitStrRaw(_, _) => true, + LitBinary(_) => true, + LitBinaryRaw(_, _) => true, + Pound => true, + At => true, + Not => true, + BinOp(Minus) => true, + BinOp(Star) => true, + BinOp(And) => true, + BinOp(Or) => true, // in lambda syntax + OrOr => true, // in lambda syntax + ModSep => true, + Interpolated(NtExpr(..)) => true, + Interpolated(NtIdent(..)) => true, + Interpolated(NtBlock(..)) => true, + Interpolated(NtPath(..)) => true, + _ => false, } } @@ -310,40 +341,47 @@ pub fn can_begin_expr(t: &Token) -> bool { /// otherwise `None`. pub fn close_delimiter_for(t: &Token) -> Option { match *t { - LPAREN => Some(RPAREN), - LBRACE => Some(RBRACE), - LBRACKET => Some(RBRACKET), - _ => None + LParen => Some(RParen), + LBrace => Some(RBrace), + LBracket => Some(RBracket), + _ => None, } } pub fn is_lit(t: &Token) -> bool { match *t { - LIT_BYTE(_) => true, - LIT_CHAR(_) => true, - LIT_INTEGER(_) => true, - LIT_FLOAT(_) => true, - LIT_STR(_) => true, - LIT_STR_RAW(_, _) => true, - LIT_BINARY(_) => true, - LIT_BINARY_RAW(_, _) => true, - _ => false + LitByte(_) => true, + LitChar(_) => true, + LitInteger(_) => true, + LitFloat(_) => true, + LitStr(_) => true, + LitStrRaw(_, _) => true, + LitBinary(_) => true, + LitBinaryRaw(_, _) => true, + _ => false, } } pub fn is_ident(t: &Token) -> bool { - match *t { IDENT(_, _) => true, _ => false } + match *t { + Ident(_, _) => true, + _ => false, + } } pub fn is_ident_or_path(t: &Token) -> bool { match *t { - IDENT(_, _) | INTERPOLATED(NtPath(..)) => true, - _ => false + Ident(_, _) => true, + Interpolated(NtPath(..)) => true, + _ => false, } } pub fn is_plain_ident(t: &Token) -> bool { - match *t { IDENT(_, false) => true, _ => false } + match *t { + Ident(_, false) => true, + _ => false, + } } // Get the first "argument" @@ -376,22 +414,28 @@ macro_rules! declare_special_idents_and_keywords {( $( ($rk_name:expr, $rk_variant:ident, $rk_str:expr); )* } ) => { - static STRICT_KEYWORD_START: Name = first!($( Name($sk_name), )*); - static STRICT_KEYWORD_FINAL: Name = last!($( Name($sk_name), )*); - static RESERVED_KEYWORD_START: Name = first!($( Name($rk_name), )*); - static RESERVED_KEYWORD_FINAL: Name = last!($( Name($rk_name), )*); + static STRICT_KEYWORD_START: ast::Name = first!($( ast::Name($sk_name), )*); + static STRICT_KEYWORD_FINAL: ast::Name = last!($( ast::Name($sk_name), )*); + static RESERVED_KEYWORD_START: ast::Name = first!($( ast::Name($rk_name), )*); + static RESERVED_KEYWORD_FINAL: ast::Name = last!($( ast::Name($rk_name), )*); pub mod special_idents { - use ast::{Ident, Name}; + use ast; $( #[allow(non_uppercase_statics)] - pub const $si_static: Ident = Ident { name: Name($si_name), ctxt: 0 }; + pub const $si_static: ast::Ident = ast::Ident { + name: ast::Name($si_name), + ctxt: 0, + }; )* } pub mod special_names { - use ast::Name; - $( #[allow(non_uppercase_statics)] pub const $si_static: Name = Name($si_name); )* + use ast; + $( + #[allow(non_uppercase_statics)] + pub const $si_static: ast::Name = ast::Name($si_name); + )* } /** @@ -402,7 +446,7 @@ macro_rules! declare_special_idents_and_keywords {( * the language and may not appear as identifiers. */ pub mod keywords { - use ast::Name; + use ast; pub enum Keyword { $( $sk_variant, )* @@ -410,10 +454,10 @@ macro_rules! declare_special_idents_and_keywords {( } impl Keyword { - pub fn to_name(&self) -> Name { + pub fn to_name(&self) -> ast::Name { match *self { - $( $sk_variant => Name($sk_name), )* - $( $rk_variant => Name($rk_name), )* + $( $sk_variant => ast::Name($sk_name), )* + $( $rk_variant => ast::Name($rk_name), )* } } } @@ -432,9 +476,9 @@ macro_rules! declare_special_idents_and_keywords {( }} // If the special idents get renumbered, remember to modify these two as appropriate -pub const SELF_KEYWORD_NAME: Name = Name(SELF_KEYWORD_NAME_NUM); -const STATIC_KEYWORD_NAME: Name = Name(STATIC_KEYWORD_NAME_NUM); -const SUPER_KEYWORD_NAME: Name = Name(SUPER_KEYWORD_NAME_NUM); +pub const SELF_KEYWORD_NAME: ast::Name = ast::Name(SELF_KEYWORD_NAME_NUM); +const STATIC_KEYWORD_NAME: ast::Name = ast::Name(STATIC_KEYWORD_NAME_NUM); +const SUPER_KEYWORD_NAME: ast::Name = ast::Name(SUPER_KEYWORD_NAME_NUM); pub const SELF_KEYWORD_NAME_NUM: u32 = 1; const STATIC_KEYWORD_NAME_NUM: u32 = 2; @@ -531,27 +575,27 @@ declare_special_idents_and_keywords! { * operator */ pub fn token_to_binop(tok: &Token) -> Option { - match *tok { - BINOP(STAR) => Some(ast::BiMul), - BINOP(SLASH) => Some(ast::BiDiv), - BINOP(PERCENT) => Some(ast::BiRem), - BINOP(PLUS) => Some(ast::BiAdd), - BINOP(MINUS) => Some(ast::BiSub), - BINOP(SHL) => Some(ast::BiShl), - BINOP(SHR) => Some(ast::BiShr), - BINOP(AND) => Some(ast::BiBitAnd), - BINOP(CARET) => Some(ast::BiBitXor), - BINOP(OR) => Some(ast::BiBitOr), - LT => Some(ast::BiLt), - LE => Some(ast::BiLe), - GE => Some(ast::BiGe), - GT => Some(ast::BiGt), - EQEQ => Some(ast::BiEq), - NE => Some(ast::BiNe), - ANDAND => Some(ast::BiAnd), - OROR => Some(ast::BiOr), - _ => None - } + match *tok { + BinOp(Star) => Some(ast::BiMul), + BinOp(Slash) => Some(ast::BiDiv), + BinOp(Percent) => Some(ast::BiRem), + BinOp(Plus) => Some(ast::BiAdd), + BinOp(Minus) => Some(ast::BiSub), + BinOp(Shl) => Some(ast::BiShl), + BinOp(Shr) => Some(ast::BiShr), + BinOp(And) => Some(ast::BiBitAnd), + BinOp(Caret) => Some(ast::BiBitXor), + BinOp(Or) => Some(ast::BiBitOr), + Lt => Some(ast::BiLt), + Le => Some(ast::BiLe), + Ge => Some(ast::BiGe), + Gt => Some(ast::BiGt), + EqEq => Some(ast::BiEq), + Ne => Some(ast::BiNe), + AndAnd => Some(ast::BiAnd), + OrOr => Some(ast::BiOr), + _ => None + } } // looks like we can get rid of this completely... @@ -646,7 +690,7 @@ impl, E> Encodable for InternedString { /// Returns the string contents of a name, using the task-local interner. #[inline] -pub fn get_name(name: Name) -> InternedString { +pub fn get_name(name: ast::Name) -> InternedString { let interner = get_ident_interner(); InternedString::new_from_rc_str(interner.get(name)) } @@ -654,7 +698,7 @@ pub fn get_name(name: Name) -> InternedString { /// Returns the string contents of an identifier, using the task-local /// interner. #[inline] -pub fn get_ident(ident: Ident) -> InternedString { +pub fn get_ident(ident: ast::Ident) -> InternedString { get_name(ident.name) } @@ -667,32 +711,32 @@ pub fn intern_and_get_ident(s: &str) -> InternedString { /// Maps a string to its interned representation. #[inline] -pub fn intern(s: &str) -> Name { +pub fn intern(s: &str) -> ast::Name { get_ident_interner().intern(s) } /// gensym's a new uint, using the current interner. #[inline] -pub fn gensym(s: &str) -> Name { +pub fn gensym(s: &str) -> ast::Name { get_ident_interner().gensym(s) } /// Maps a string to an identifier with an empty syntax context. #[inline] -pub fn str_to_ident(s: &str) -> Ident { - Ident::new(intern(s)) +pub fn str_to_ident(s: &str) -> ast::Ident { + ast::Ident::new(intern(s)) } /// Maps a string to a gensym'ed identifier. #[inline] -pub fn gensym_ident(s: &str) -> Ident { - Ident::new(gensym(s)) +pub fn gensym_ident(s: &str) -> ast::Ident { + ast::Ident::new(gensym(s)) } // create a fresh name that maps to the same string as the old one. // note that this guarantees that str_ptr_eq(ident_to_string(src),interner_get(fresh_name(src))); // that is, that the new name and the old one are connected to ptr_eq strings. -pub fn fresh_name(src: &Ident) -> Name { +pub fn fresh_name(src: &ast::Ident) -> ast::Name { let interner = get_ident_interner(); interner.gensym_copy(src.name) // following: debug version. Could work in final except that it's incompatible with @@ -703,7 +747,7 @@ pub fn fresh_name(src: &Ident) -> Name { } // create a fresh mark. -pub fn fresh_mark() -> Mrk { +pub fn fresh_mark() -> ast::Mrk { gensym("mark").uint() as u32 } @@ -711,14 +755,14 @@ pub fn fresh_mark() -> Mrk { pub fn is_keyword(kw: keywords::Keyword, tok: &Token) -> bool { match *tok { - token::IDENT(sid, false) => { kw.to_name() == sid.name } + Ident(sid, false) => { kw.to_name() == sid.name } _ => { false } } } pub fn is_any_keyword(tok: &Token) -> bool { match *tok { - token::IDENT(sid, false) => { + Ident(sid, false) => { let n = sid.name; n == SELF_KEYWORD_NAME @@ -733,7 +777,7 @@ pub fn is_any_keyword(tok: &Token) -> bool { pub fn is_strict_keyword(tok: &Token) -> bool { match *tok { - token::IDENT(sid, false) => { + Ident(sid, false) => { let n = sid.name; n == SELF_KEYWORD_NAME @@ -742,7 +786,7 @@ pub fn is_strict_keyword(tok: &Token) -> bool { || STRICT_KEYWORD_START <= n && n <= STRICT_KEYWORD_FINAL }, - token::IDENT(sid, true) => { + Ident(sid, true) => { let n = sid.name; n != SELF_KEYWORD_NAME @@ -756,7 +800,7 @@ pub fn is_strict_keyword(tok: &Token) -> bool { pub fn is_reserved_keyword(tok: &Token) -> bool { match *tok { - token::IDENT(sid, false) => { + Ident(sid, false) => { let n = sid.name; RESERVED_KEYWORD_START <= n @@ -768,7 +812,7 @@ pub fn is_reserved_keyword(tok: &Token) -> bool { pub fn mtwt_token_eq(t1 : &Token, t2 : &Token) -> bool { match (t1,t2) { - (&IDENT(id1,_),&IDENT(id2,_)) | (&LIFETIME(id1),&LIFETIME(id2)) => + (&Ident(id1,_),&Ident(id2,_)) | (&Lifetime(id1),&Lifetime(id2)) => mtwt::resolve(id1) == mtwt::resolve(id2), _ => *t1 == *t2 } @@ -786,9 +830,9 @@ mod test { } #[test] fn mtwt_token_eq_test() { - assert!(mtwt_token_eq(>,>)); + assert!(mtwt_token_eq(&Gt,&Gt)); let a = str_to_ident("bac"); let a1 = mark_ident(a,92); - assert!(mtwt_token_eq(&IDENT(a,true),&IDENT(a1,false))); + assert!(mtwt_token_eq(&Ident(a,true),&Ident(a1,false))); } } diff --git a/src/libsyntax/print/pprust.rs b/src/libsyntax/print/pprust.rs index 69e6d78d16a57..25ef8700ed005 100644 --- a/src/libsyntax/print/pprust.rs +++ b/src/libsyntax/print/pprust.rs @@ -1035,7 +1035,7 @@ impl<'a> State<'a> { ast::TtToken(_, ref tk) => { try!(word(&mut self.s, parse::token::to_string(tk).as_slice())); match *tk { - parse::token::DOC_COMMENT(..) => { + parse::token::DocComment(..) => { hardbreak(&mut self.s) } _ => Ok(()) diff --git a/src/test/auxiliary/roman_numerals.rs b/src/test/auxiliary/roman_numerals.rs index 40ed3a35ddf13..519f32fc248bd 100644 --- a/src/test/auxiliary/roman_numerals.rs +++ b/src/test/auxiliary/roman_numerals.rs @@ -17,7 +17,7 @@ extern crate syntax; extern crate rustc; use syntax::codemap::Span; -use syntax::parse::token::{IDENT, get_ident}; +use syntax::parse::token; use syntax::ast::{TokenTree, TtToken}; use syntax::ext::base::{ExtCtxt, MacResult, DummyResult, MacExpr}; use syntax::ext::build::AstBuilder; // trait for expr_uint @@ -39,7 +39,7 @@ fn expand_rn(cx: &mut ExtCtxt, sp: Span, args: &[TokenTree]) ("I", 1)]; let text = match args { - [TtToken(_, IDENT(s, _))] => get_ident(s).to_string(), + [TtToken(_, token::Ident(s, _))] => token::get_ident(s).to_string(), _ => { cx.span_err(sp, "argument should be a single identifier"); return DummyResult::any(sp); diff --git a/src/test/compile-fail/removed-syntax-record.rs b/src/test/compile-fail/removed-syntax-record.rs index 38c5122c2c3f4..b3fa04d8025bd 100644 --- a/src/test/compile-fail/removed-syntax-record.rs +++ b/src/test/compile-fail/removed-syntax-record.rs @@ -8,4 +8,4 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -type t = { f: () }; //~ ERROR expected type, found token LBRACE +type t = { f: () }; //~ ERROR expected type, found token LBrace