Permalink
Browse files
Now able to build oil.ovm with fastlex.c.
Fixed duplicate definitions, etc.
Also:
- Remove unnecessary IdName usages
- Fix test cases in lexer_gen_test.py
- Loading branch information...
|
|
@@ -25,9 +25,9 @@ def main(argv): |
|
|
|
|
|
# Hard-coded special cases for now.
|
|
|
|
|
|
if mod_name == 'libc': # Our own module
|
|
|
if mod_name in ('libc', 'fastlex'): # Our own modules
|
|
|
# Relative to Python-2.7.13 dir
|
|
|
print '../native/libc.c'
|
|
|
print '../native/%s.c' % mod_name
|
|
|
|
|
|
elif mod_name == 'math':
|
|
|
print 'Modules/mathmodule.c'
|
|
|
|
|
|
@@ -138,7 +138,7 @@ readonly PREPROC_FLAGS=( |
|
|
-D Py_BUILD_CORE
|
|
|
)
|
|
|
|
|
|
readonly INCLUDE_PATHS=(-I . -I Include)
|
|
|
readonly INCLUDE_PATHS=(-I . -I Include -I ../_build/gen)
|
|
|
readonly CC=${CC:-cc} # cc should be on POSIX systems
|
|
|
|
|
|
# BASE_CFLAGS is copied by observation from what configure.ac does on my Ubuntu
|
|
|
@@ -150,7 +150,7 @@ readonly CC=${CC:-cc} # cc should be on POSIX systems |
|
|
# - gcc 4.x and Clang need -fwrapv
|
|
|
|
|
|
# TODO:
|
|
|
# - -DNDEBUG is also passed. What is that?
|
|
|
# - -DNDEBUG is also passed. That turns off asserts. Do we want that?
|
|
|
# - We should auto-detect the flags in configure, or simplify the source so it
|
|
|
# isn't necessary. Python's configure.ac sometimes does it by compiling a test
|
|
|
# file; at other times it does it by grepping $CC --help.
|
|
|
|
|
|
@@ -17,7 +17,7 @@ |
|
|
except ImportError:
|
|
|
from benchmarks import fake_libc as libc
|
|
|
|
|
|
from core.id_kind import BOOL_OPS, OperandType, Id, IdName
|
|
|
from core.id_kind import BOOL_OPS, OperandType, Id
|
|
|
from core import util
|
|
|
from core import runtime
|
|
|
|
|
|
@@ -518,7 +518,6 @@ def Eval(self, node): |
|
|
|
|
|
raise NotImplementedError(arg_type)
|
|
|
|
|
|
#if node.id == Id.Node_BinaryExpr:
|
|
|
if node.tag == bool_expr_e.BoolBinary:
|
|
|
op_id = node.op_id
|
|
|
|
|
|
@@ -602,5 +601,4 @@ def Eval(self, node): |
|
|
|
|
|
raise NotImplementedError(op_id)
|
|
|
|
|
|
# We could have govered all node IDs
|
|
|
raise AssertionError(IdName(node.id))
|
|
|
raise AssertionError(node.tag)
|
|
|
@@ -12,7 +12,7 @@ |
|
|
import re
|
|
|
|
|
|
from core import util
|
|
|
from core.id_kind import Id, IdName
|
|
|
from core.id_kind import Id
|
|
|
|
|
|
from osh import ast_ as ast
|
|
|
|
|
|
@@ -196,9 +196,6 @@ def PushHint(self, old_id, new_id): |
|
|
- precedence in [[, e.g. [[ (1 == 2) && (2 == 3) ]]
|
|
|
- arrays: a=(1 2 3), a+=(4 5)
|
|
|
"""
|
|
|
old_s = IdName(old_id)
|
|
|
new_s = IdName(new_id)
|
|
|
#print('* Lexer.PushHint %s => %s' % (old_s, new_s))
|
|
|
self.translation_stack.append((old_id, new_id))
|
|
|
|
|
|
def _Read(self, lex_mode):
|
|
|
@@ -218,7 +215,6 @@ def _Read(self, lex_mode): |
|
|
if self.translation_stack:
|
|
|
old_id, new_id = self.translation_stack[-1] # top
|
|
|
if t.id == old_id:
|
|
|
new_s = IdName(new_id)
|
|
|
#print('==> TRANSLATING %s ==> %s' % (t, new_s))
|
|
|
self.translation_stack.pop()
|
|
|
#print(self.translation_stack)
|
|
|
|
|
|
@@ -13,20 +13,20 @@ class LexerGenTest(unittest.TestCase): |
|
|
|
|
|
def testTranslateRegex(self):
|
|
|
PAIRS = [
|
|
|
(r'a', r'a'),
|
|
|
(r'a', r'"a" '),
|
|
|
(r'[a-z]', r'[a-z]'),
|
|
|
(r'[a-zA-Z.]+', r'[a-zA-Z.]+'),
|
|
|
(r'[a-zA-Z_][a-zA-Z0-9_]*\+?=', r'[a-zA-Z_][a-zA-Z0-9_]*\+?='),
|
|
|
(r'[a-zA-Z.]+', r'[a-zA-Z.]+ '),
|
|
|
(r'[a-zA-Z_][a-zA-Z0-9_]*\+?=', r'[a-zA-Z_][a-zA-Z0-9_]* "+" ? "=" '),
|
|
|
|
|
|
(r'[."]*', r'[.\"]*'),
|
|
|
(r'\$', r'\$'),
|
|
|
(r'.*', r'.*'),
|
|
|
(r'[."]*', r'[."]* '),
|
|
|
(r'\$', r'"$" '),
|
|
|
(r'.*', r'.* '),
|
|
|
|
|
|
# Both of these accepted?
|
|
|
('\0', r'\000'),
|
|
|
(r'\0', r'\000'),
|
|
|
(r'\\', r'\\'),
|
|
|
(r'[\\]', r'\\'),
|
|
|
('\0', r'"\x00" '),
|
|
|
(r'\0', r'"\x00" '),
|
|
|
(r'\\', r'"\\" '),
|
|
|
(r'[\\]', r'"\\" '),
|
|
|
|
|
|
(r'.', r'.'),
|
|
|
(r'[^a]', r'[^a]'),
|
|
|
|
|
|
@@ -3,7 +3,7 @@ |
|
|
tdop.py - Library for expression parsing.
|
|
|
"""
|
|
|
|
|
|
from core.id_kind import Id, IdName
|
|
|
from core.id_kind import Id
|
|
|
from core import word
|
|
|
from core import util
|
|
|
|
|
|
@@ -241,9 +241,8 @@ def AtToken(self, token_type): |
|
|
def Eat(self, token_type):
|
|
|
""" Eat()? """
|
|
|
if not self.AtToken(token_type):
|
|
|
t = IdName(token_type)
|
|
|
p_die('Parser expected %s, got %s', t, self.cur_word, word=self.cur_word)
|
|
|
|
|
|
p_die('Parser expected %s, got %s', token_type, self.cur_word,
|
|
|
word=self.cur_word)
|
|
|
self.Next()
|
|
|
|
|
|
def Next(self):
|
|
|
|
|
|
@@ -106,9 +106,6 @@ def StaticEval(w): |
|
|
def LeftMostSpanForPart(part):
|
|
|
# TODO: Write unit tests in ui.py for error values
|
|
|
|
|
|
#from core.id_kind import IdName
|
|
|
#print(IdName(part.id))
|
|
|
|
|
|
if part.tag == word_part_e.ArrayLiteralPart:
|
|
|
if not part.words:
|
|
|
return -1
|
|
|
@@ -164,9 +161,6 @@ def LeftMostSpanForPart(part): |
|
|
def _RightMostSpanForPart(part):
|
|
|
# TODO: Write unit tests in ui.py for error values
|
|
|
|
|
|
#from core.id_kind import IdName
|
|
|
#print(IdName(part.id))
|
|
|
|
|
|
if part.tag == word_part_e.ArrayLiteralPart:
|
|
|
# TODO: Return )
|
|
|
return LeftMostSpanForWord(part.words[0]) # Hm this is a=(1 2 3)
|
|
|
|
|
|
@@ -9,7 +9,7 @@ |
|
|
from core import braces
|
|
|
from core import expr_eval
|
|
|
from core import glob_
|
|
|
from core.id_kind import Id, Kind, IdName, LookupKind
|
|
|
from core.id_kind import Id, Kind, LookupKind
|
|
|
from core import runtime
|
|
|
from core import state
|
|
|
from core import util
|
|
|
|
|
|
@@ -14,15 +14,15 @@ |
|
|
|
|
|
// TODO: Should this be shared among all extensions?
|
|
|
// Log messages to stderr.
|
|
|
void debug(const char* fmt, ...) {
|
|
|
#if 1
|
|
|
#if 0
|
|
|
static void debug(const char* fmt, ...) {
|
|
|
va_list args;
|
|
|
va_start(args, fmt);
|
|
|
vfprintf(stderr, fmt, args);
|
|
|
va_end(args);
|
|
|
fprintf(stderr, "\n");
|
|
|
#endif
|
|
|
}
|
|
|
#endif
|
|
|
|
|
|
static PyObject *
|
|
|
fastlex_MatchToken(PyObject *self, PyObject *args) {
|
|
|
@@ -60,7 +60,7 @@ fastlex_MatchToken(PyObject *self, PyObject *args) { |
|
|
// SlowTokenMatcher
|
|
|
// FastTokenMatcher
|
|
|
|
|
|
PyMethodDef methods[] = {
|
|
|
static PyMethodDef methods[] = {
|
|
|
{"MatchToken", fastlex_MatchToken, METH_VARARGS,
|
|
|
"(lexer mode, line, start_pos) -> (id, end_pos)."},
|
|
|
{NULL, NULL},
|
|
|
|
|
|
@@ -16,7 +16,7 @@ |
|
|
#include <Python.h>
|
|
|
|
|
|
// Log messages to stderr.
|
|
|
void debug(const char* fmt, ...) {
|
|
|
static void debug(const char* fmt, ...) {
|
|
|
#ifdef LIBC_VERBOSE
|
|
|
va_list args;
|
|
|
va_start(args, fmt);
|
|
|
@@ -261,7 +261,7 @@ func_regex_match(PyObject *self, PyObject *args) { |
|
|
}
|
|
|
}
|
|
|
|
|
|
PyMethodDef methods[] = {
|
|
|
static PyMethodDef methods[] = {
|
|
|
{"fnmatch", func_fnmatch, METH_VARARGS,
|
|
|
"Return whether a string matches a pattern."},
|
|
|
// Python's glob doesn't have char classes
|
|
|
|
|
|
@@ -37,7 +37,7 @@ |
|
|
from osh import ast_ as ast
|
|
|
|
|
|
from core import word
|
|
|
from core.id_kind import Id, Kind, LookupKind, IdName
|
|
|
from core.id_kind import Id, Kind, LookupKind
|
|
|
from core import util
|
|
|
|
|
|
try:
|
|
|
@@ -205,7 +205,6 @@ def ParseFactor(self): |
|
|
| WORD BINARY_OP WORD
|
|
|
| '(' Expr ')'
|
|
|
"""
|
|
|
#print('ParseFactor %s %s' % (self.b_kind, IdName(self.op_id)))
|
|
|
if self.b_kind == Kind.BoolUnary:
|
|
|
# Just save the type and not the token itself?
|
|
|
op = self.op_id
|
|
|
|
|
|
@@ -9,7 +9,7 @@ |
|
|
word_parse.py - Parse the shell word language.
|
|
|
"""
|
|
|
|
|
|
from core.id_kind import Id, Kind, IdName, LookupKind
|
|
|
from core.id_kind import Id, Kind, LookupKind
|
|
|
from core import braces
|
|
|
from core import word
|
|
|
from core import tdop
|
|
|
|
|
|
@@ -12,7 +12,7 @@ |
|
|
import unittest
|
|
|
|
|
|
from core import alloc
|
|
|
from core.id_kind import Id, IdName
|
|
|
from core.id_kind import Id
|
|
|
from core import word
|
|
|
from core.test_lib import TokenWordsEqual
|
|
|
|
|
|
|
0 comments on commit
80a1974