Permalink
Browse files

Simplify pgen2/driver, remove dependency on logging module.

This introduces a couple more regtest.sh diffs, one in
logging/__init__.pyc.

Ran the OSH unit tests under OPy to be sure.
  • Loading branch information...
Andy Chu
Andy Chu committed Mar 20, 2018
1 parent cd1e3c4 commit e67796b5a8cbbfb7e14290b8228e51127ec27d77
Showing with 23 additions and 19 deletions.
  1. +1 −0 opy/compiler2/pycodegen.py
  2. +5 −3 opy/opy_main.py
  3. +17 −16 opy/pgen2/driver.py
@@ -1329,6 +1329,7 @@ def Finish(self):
self.emit('LOAD_CONST', None)
self.emit('RETURN_VALUE')
class LambdaCodeGenerator(_FunctionCodeGenerator):
def _Start(self):
View
@@ -185,7 +185,7 @@ def OpyCommandMain(argv):
symbols = None
tr = None
dr = driver.Driver(gr, convert=py2st)
dr = driver.Driver(gr)
if action == 'pgen2':
grammar_path = argv[1]
@@ -218,7 +218,7 @@ def OpyCommandMain(argv):
py_path = argv[1]
with open(py_path) as f:
tokens = tokenize.generate_tokens(f.readline)
tree = dr.parse_tokens(tokens, start_symbol=FILE_INPUT)
tree = dr.parse_tokens(tokens, convert=py2st, start_symbol=FILE_INPUT)
if isinstance(tree, tuple):
n = CountTupleTree(tree)
@@ -236,7 +236,7 @@ def OpyCommandMain(argv):
with open(py_path) as f:
tokens = tokenize.generate_tokens(f.readline)
parse_tree = dr.parse_tokens(tokens, start_symbol=FILE_INPUT)
parse_tree = dr.parse_tokens(tokens, convert=py2st, start_symbol=FILE_INPUT)
as_tree = tr.transform(parse_tree)
co = pycodegen.compile(as_tree, py_path, 'exec')
log("Compiled to %d bytes of bytecode", len(co.co_code))
@@ -252,6 +252,7 @@ def OpyCommandMain(argv):
f = cStringIO.StringIO(py_expr)
tokens = tokenize.generate_tokens(f.readline)
parse_tree = dr.parse_tokens(tokens,
convert=py2st,
start_symbol=gr.symbol2number['eval_input'])
as_tree = tr.transform(parse_tree)
co = pycodegen.compile(as_tree, '<eval input>', 'eval')
@@ -270,6 +271,7 @@ def OpyCommandMain(argv):
tokens = tokenize.generate_tokens(f.readline)
# TODO: change this to 'single input'? Why doesn't this work?
parse_tree = dr.parse_tokens(tokens,
convert=py2st,
start_symbol=gr.symbol2number['eval_input'])
as_tree = tr.transform(parse_tree)
co = pycodegen.compile(as_tree, '<REPL input>', 'single')
View
@@ -4,38 +4,40 @@
# Modifications:
# Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from __future__ import print_function
"""Parser driver.
This provides a high-level interface to parse a file into a syntax tree.
A high-level interface to parse a file into a syntax tree.
"""
__author__ = "Guido van Rossum <guido@python.org>"
__all__ = ["Driver"]
# Python imports
import logging
import sys
# Pgen imports
from . import grammar, parse, token, tokenize
def log(msg, *args):
if args:
msg = msg % args
print(msg, file=sys.stderr)
class Driver(object):
def __init__(self, grammar, convert=None, logger=None):
def __init__(self, grammar):
self.grammar = grammar
if logger is None:
logger = logging.getLogger()
self.logger = logger
self.convert = convert
def parse_tokens(self, tokens, start_symbol=None, debug=False):
def parse_tokens(self, tokens, start_symbol=None, convert=None, debug=False):
"""Parse a series of tokens and return the syntax tree."""
# XXX Move the prefix computation into a wrapper around tokenize.
p = parse.Parser(self.grammar, self.convert)
p = parse.Parser(self.grammar, convert=convert)
p.setup(start=start_symbol)
# What is all this for?
lineno = 1
column = 0
type_ = value = start = end = line_text = None
@@ -62,11 +64,10 @@ def parse_tokens(self, tokens, start_symbol=None, debug=False):
if type_ == token.OP:
type_ = grammar.opmap[value]
if debug:
self.logger.debug("%s %r (prefix=%r)",
token.tok_name[type_], value, prefix)
log("%s %r (prefix=%r)", token.tok_name[type_], value, prefix)
if p.addtoken(type_, value, (prefix, start)):
if debug:
self.logger.debug("Stop.")
log("Stop.")
break
prefix = ""
lineno, column = end

0 comments on commit e67796b

Please sign in to comment.