Permalink
Browse files

Add a new multiline format to sh_spec.py.

Instead of:

    # stdout-json: "1\n2\n"

we can now write:

    ## STDOUT:
    one
    two
    ## END

Use the new multiline format in {smoke,xtrace}.test.sh.
  • Loading branch information...
Andy Chu
Andy Chu committed Dec 28, 2017
1 parent 4bfa951 commit b43c7382565e9a6b035dd42f96fb3614f3e54d3f
Showing with 191 additions and 56 deletions.
  1. +11 −4 spec/smoke.test.sh
  2. +26 −12 spec/xtrace.test.sh
  3. +100 −27 test/sh_spec.py
  4. +54 −13 test/sh_spec_test.py
View
@@ -83,16 +83,23 @@ ls /nonexistent
### subshell
(echo 1; echo 2)
# stdout-json: "1\n2\n"
# status: 0
## status: 0
## STDOUT:
1
2
## END
### for loop
for i in a b c
do
echo $i
done
# stdout-json: "a\nb\nc\n"
# status: 0
## status: 0
# STDOUT:
a
b
c
## END
### vars
a=5
View
@@ -1,22 +1,30 @@
#!/usr/bin/env bash
#
# xtrace test. Test PS4 and line numbers, etc.
#
# TODO: need multiline test format
### basic xtrace
set -x
echo one >&2
echo two >&2
# stdout-json: ""
# stderr-json: "+ echo one\none\n+ echo two\ntwo\n"
### xtrace
echo 1
set -o xtrace
echo 2
# stdout-json: "1\n2\n"
# stderr: + echo 2
## STDOUT:
1
2
## END
## STDERR:
+ echo 2
## END
### xtrace written before command executes
set -x
echo one >&2
echo two >&2
## stdout-json: ""
## STDERR:
+ echo one
one
+ echo two
two
## END
### PS4 is scoped
set -x
@@ -27,4 +35,10 @@ f() {
}
f
echo two
# stderr-json: "+ echo one\n+ f\n+ local 'PS4=- '\n- echo func\n+ echo two\n"
## STDERR:
+ echo one
+ f
+ local 'PS4=- '
- echo func
+ echo two
## END
View
@@ -34,6 +34,22 @@
it will be a BUG.
If one shell disagrees with others, that is generally a BUG.
Example test case:
### hello and fail
echo hello
echo world
exit 1
## status: 1
#
# ignored comment
#
## STDOUT
hello
world
## END
"""
import collections
@@ -58,29 +74,31 @@ def log(msg, *args):
print(msg, file=sys.stderr)
# Example:
# EXAMPLES:
# stdout: foo
#
# TODO: Also support
# mksh: status: 2
# bash/mksh status: 2
# bash/mksh stdout: hi there
# stdout-json: ""
#
# In other words, it could be (name, value) or (qualifier, name, value)
KEY_VALUE_RE = re.compile(r'''
[#] \s+
[#][#]? \s+
(?: (OK|BUG|N-I) \s+ ([\w+/]+) \s+ )? # optional prefix
([\w\-]+) # key
:
\s* (.*) # value
''', re.VERBOSE)
END_MULTILINE_RE = re.compile(r'''
[#][#]? \s+ END
''', re.VERBOSE)
# Line types
TEST_CASE_BEGIN = 0 # Starts with ###
KEY_VALUE = 1 # Metadata
CODE = 2 # Unquoted
EOF = 3
KEY_VALUE_MULTILINE = 2 # STDOUT STDERR
END_MULTILINE = 3 # STDOUT STDERR
PLAIN_LINE = 4 # Uncommented
EOF = 5
def LineIter(f):
@@ -102,7 +120,17 @@ def LineIter(f):
# HACK: Expected data should have the newline.
if name in ('stdout', 'stderr'):
value += '\n'
yield line_num, KEY_VALUE, (qualifier, shells, name, value)
if name in ('STDOUT', 'STDERR'):
token_type = KEY_VALUE_MULTILINE
else:
token_type = KEY_VALUE
yield line_num, token_type, (qualifier, shells, name, value)
continue
m = END_MULTILINE_RE.match(line)
if m:
yield line_num, END_MULTILINE, None
continue
if line.lstrip().startswith('#'):
@@ -113,13 +141,14 @@ def LineIter(f):
# Non-empty line that doesn't start with '#'
# NOTE: We need the original line to test the whitespace sensitive <<-.
# And we need rstrip because we add newlines back below.
yield line_num, CODE, line.rstrip()
yield line_num, PLAIN_LINE, line.rstrip()
yield line_num, EOF, None
class Tokenizer(object):
"""Wrap a token iterator in a Tokenizer interface."""
def __init__(self, it):
self.it = it
self.cursor = None
@@ -148,38 +177,80 @@ def peek(self):
#
# -- Should be a blank line after each test case. Leading comments and code
# -- are OK.
# test_file = (COMMENT | CODE)* (test_case '\n')*
# test_file = (COMMENT | PLAIN_LINE)* (test_case '\n')*
def AddMetadataToCase(case, qualifier, shells, name, value):
shells = shells.split('/') # bash/dash/mksh
for shell in shells:
if shell not in case:
case[shell] = {}
case[shell][name] = value
case[shell]['qualifier'] = qualifier
def ParseKeyValue(tokens, case):
"""Parse commented-out metadata in a test case."""
"""Parse commented-out metadata in a test case.
The metadata must be contiguous.
Args:
tokens: Tokenizer
case: dictionary to add to
"""
while True:
_, kind, item = tokens.peek()
if kind != KEY_VALUE:
line_num, kind, item = tokens.peek()
if kind == KEY_VALUE_MULTILINE:
qualifier, shells, name, empty_value = item
print('item', item)
if empty_value:
raise ParseError(
'Line %d: got value %r for %r, but the value should be on the '
'following lines' % (line_num, empty_value, name))
value_lines = []
while True:
tokens.next()
_, kind2, item2 = tokens.peek()
if kind2 != PLAIN_LINE:
break
value_lines.append(item2)
if kind2 != END_MULTILINE:
raise ParseError('Expected END token, got %r %r' % (kind2, item2))
value = '\n'.join(value_lines) + '\n'
name = name.lower() # STDOUT -> stdout
if qualifier:
AddMetadataToCase(case, qualifier, shells, name, value)
else:
case[name] = value
elif kind == KEY_VALUE:
qualifier, shells, name, value = item
if qualifier:
AddMetadataToCase(case, qualifier, shells, name, value)
else:
case[name] = value
else: # Unknown token type
break
qualifier, shells, name, value = item
if qualifier:
shells = shells.split('/') # bash/dash/mksh
for shell in shells:
if shell not in case:
case[shell] = {}
case[shell][name] = value
case[shell]['qualifier'] = qualifier
else:
case[name] = value
tokens.next()
def ParseCodeLines(tokens, case):
"""Parse uncommented code in a test case."""
_, kind, item = tokens.peek()
if kind != CODE:
if kind != PLAIN_LINE:
raise ParseError('Expected a line of code (got %r, %r)' % (kind, item))
code_lines = []
while True:
_, kind, item = tokens.peek()
if kind != CODE:
if kind != PLAIN_LINE:
case['code'] = '\n'.join(code_lines) + '\n'
return
code_lines.append(item)
@@ -202,7 +273,9 @@ def ParseTestCase(tokens):
#print case
ParseKeyValue(tokens, case)
#print 'KV1', case
# For broken code
if 'code' in case: # Got it through a key value pair
return case
View
@@ -9,7 +9,7 @@
from sh_spec import * # module under test
TEST = io.BytesIO("""\
TEST1 = io.BytesIO("""\
### Env binding in readonly/declare disallowed
FOO=foo readonly v=$(tests/printenv.py FOO)
echo "v=$v"
@@ -20,35 +20,76 @@
# status: 2
""")
TOKENS = list(LineIter(TEST))
CASE = ParseTestCase(Tokenizer(iter(TOKENS)))
TOKENS1 = list(LineIter(TEST1))
CASE1 = ParseTestCase(Tokenizer(iter(TOKENS1)))
class TestShTest(unittest.TestCase):
TEST2 = io.BytesIO("""\
### Multiline test case
echo one
echo two
# status: 1
# stderr-json: ""
# STDOUT:
one
two
# END
# OK dash STDOUT:
dash1
dash2
# END
""")
TOKENS2 = list(LineIter(TEST2))
CASE2 = ParseTestCase(Tokenizer(iter(TOKENS2)))
class ShSpecTest(unittest.TestCase):
def testLineIter(self):
pprint.pprint(TOKENS)
#pprint.pprint(TOKENS1)
types = [type_ for line_num, type_, value in TOKENS]
types = [type_ for line_num, type_, value in TOKENS1]
self.assertEqual(
[ TEST_CASE_BEGIN, CODE, CODE,
[ TEST_CASE_BEGIN, PLAIN_LINE, PLAIN_LINE,
KEY_VALUE, KEY_VALUE, KEY_VALUE,
EOF], types)
pprint.pprint(CASE)
#pprint.pprint(TOKENS2)
types2 = [type_ for line_num, type_, value in TOKENS2]
self.assertEqual(
[ TEST_CASE_BEGIN, PLAIN_LINE, PLAIN_LINE,
KEY_VALUE, KEY_VALUE,
KEY_VALUE_MULTILINE, PLAIN_LINE, PLAIN_LINE, END_MULTILINE,
KEY_VALUE_MULTILINE, PLAIN_LINE, PLAIN_LINE, END_MULTILINE,
EOF], types2)
def testParsed(self):
print('CASE1')
pprint.pprint(CASE1)
print()
expected = {'status': '0', 'stdout': 'v=None\n', 'qualifier': 'OK'}
self.assertEqual(expected, CASE['bash'])
self.assertEqual(expected, CASE['dash'])
self.assertEqual(expected, CASE['mksh'])
self.assertEqual(expected, CASE1['bash'])
self.assertEqual(expected, CASE1['dash'])
self.assertEqual(expected, CASE1['mksh'])
self.assertEqual('2', CASE1['status'])
self.assertEqual('Env binding in readonly/declare disallowed', CASE1['desc'])
print('CASE2')
pprint.pprint(CASE2)
print()
print(CreateAssertions(CASE2, 'bash'))
self.assertEqual('one\ntwo\n', CASE2['stdout'])
self.assertEqual({'qualifier': 'OK', 'stdout': 'dash1\ndash2\n'}, CASE2['dash'])
def testCreateAssertions(self):
print(CreateAssertions(CASE, 'bash'))
print(CreateAssertions(CASE1, 'bash'))
def testRunCases(self):
shells = [('bash', '/bin/bash'), ('osh', 'bin/osh')]
env = {}
out = AnsiOutput(sys.stdout, False)
RunCases([CASE], lambda i, case: True, shells, env, out)
RunCases([CASE1], lambda i, case: True, shells, env, out)
if __name__ == '__main__':

0 comments on commit b43c738

Please sign in to comment.