Skip to content

Commit

Permalink
mtest: unify parsed and non-parsed output handling
Browse files Browse the repository at this point in the history
Use the same routines to handle output both when parsing the output and
when not. Also fixes broken stderr handling for parsed tests.
  • Loading branch information
trhd authored and nirbheek committed Aug 8, 2022
1 parent e51a817 commit d05b128
Show file tree
Hide file tree
Showing 4 changed files with 97 additions and 25 deletions.
38 changes: 13 additions & 25 deletions mesonbuild/mtest.py
Expand Up @@ -1108,7 +1108,9 @@ def decode(stream: T.Union[None, bytes]) -> str:
except UnicodeDecodeError:
return stream.decode('iso-8859-1', errors='ignore')

async def read_decode(reader: asyncio.StreamReader, console_mode: ConsoleUser) -> str:
async def read_decode(reader: asyncio.StreamReader,
queue: T.Optional['asyncio.Queue[T.Optional[str]]'],
console_mode: ConsoleUser) -> str:
stdo_lines = []
try:
while not reader.at_eof():
Expand All @@ -1124,26 +1126,14 @@ async def read_decode(reader: asyncio.StreamReader, console_mode: ConsoleUser) -
stdo_lines.append(line)
if console_mode is ConsoleUser.STDOUT:
print(line, end='', flush=True)
return ''.join(stdo_lines)
except asyncio.CancelledError:
return ''.join(stdo_lines)

# Extract lines out of the StreamReader. Print them
# along the way if requested, and at the end collect
# them all into a future.
async def read_decode_lines(reader: asyncio.StreamReader,
q: 'asyncio.Queue[T.Optional[str]]') -> str:
stdo_lines = []
try:
while not reader.at_eof():
line = decode(await reader.readline())
stdo_lines.append(line)
await q.put(line)
if queue:
await queue.put(line)
return ''.join(stdo_lines)
except asyncio.CancelledError:
return ''.join(stdo_lines)
finally:
await q.put(None)
if queue:
await queue.put(None)

def run_with_mono(fname: str) -> bool:
return fname.endswith('.exe') and not (is_windows() or is_cygwin())
Expand Down Expand Up @@ -1220,12 +1210,11 @@ def __init__(self, p: asyncio.subprocess.Process,
self.stde_task = None # type: T.Optional[asyncio.Future[str]]
self.postwait_fn = postwait_fn # type: T.Callable[[], None]
self.all_futures = [] # type: T.List[asyncio.Future]
self.queue = None # type: T.Optional[asyncio.Queue[T.Optional[str]]]

def stdout_lines(self) -> T.AsyncIterator[str]:
q = asyncio.Queue() # type: asyncio.Queue[T.Optional[str]]
decode_coro = read_decode_lines(self._process.stdout, q)
self.stdo_task = asyncio.ensure_future(decode_coro)
return queue_iter(q)
self.queue = asyncio.Queue()
return queue_iter(self.queue)

def communicate(self,
test: 'TestRun',
Expand All @@ -1234,12 +1223,12 @@ def communicate(self,
async def collect_stdo(test: 'TestRun',
reader: asyncio.StreamReader,
console_mode: ConsoleUser) -> None:
test.stdo = await read_decode(reader, console_mode)
test.stdo = await read_decode(reader, self.queue, console_mode)

async def collect_stde(test: 'TestRun',
reader: asyncio.StreamReader,
console_mode: ConsoleUser) -> None:
test.stde = await read_decode(reader, console_mode)
test.stde = await read_decode(reader, None, console_mode)

# asyncio.ensure_future ensures that printing can
# run in the background, even before it is awaited
Expand Down Expand Up @@ -1483,11 +1472,10 @@ async def _run_cmd(self, harness: 'TestHarness', cmd: T.List[str]) -> None:
if self.runobj.needs_parsing:
parse_coro = self.runobj.parse(harness, p.stdout_lines())
parse_task = asyncio.ensure_future(parse_coro)
stdo_task = stde_task = None
else:
stdo_task, stde_task = p.communicate(self.runobj, self.console_mode)
parse_task = None

stdo_task, stde_task = p.communicate(self.runobj, self.console_mode)
await p.wait(self.runobj)

if parse_task:
Expand Down
17 changes: 17 additions & 0 deletions test cases/common/253 long output/dumper.c
@@ -0,0 +1,17 @@
#include <stdio.h>

int main(void)
{
for (int i = 0 ; i < 100000 ; i++)
fprintf(stderr, "# Iteration %d to stderr\n", i + 1);

printf("ok 1 - dumper to stderr\n");

for (int i = 0 ; i < 100000 ; i++)
fprintf(stdout, "# Iteration %d to stdout\n", i + 1);

printf("ok 2 - dumper to stdout\n1..2\n");

return 0;
}

5 changes: 5 additions & 0 deletions test cases/common/253 long output/meson.build
@@ -0,0 +1,5 @@
project('long-stderr', 'c')

dumper = executable('dumper', 'dumper.c')
test('dump-test', dumper)
test('dump-test-TAP', dumper, protocol : 'tap')
62 changes: 62 additions & 0 deletions unittests/allplatformstests.py
Expand Up @@ -577,6 +577,68 @@ def test_verbose(self):
out = self._run(self.mtest_command + ['--suite', 'verbose'])
self.assertIn('1/1 subtest 1', out)

def test_long_output(self):
testdir = os.path.join(self.common_test_dir, '253 long output')
self.init(testdir)
self.build()
self.run_tests()

# Ensure lines are found from testlog.txt when not being verbose.

i = 1
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
line = f.readline()
while line and i < 100001:
if f'# Iteration {i} to stdout' in line:
i += 1
line = f.readline()
self.assertEqual(i, 100001)

i = 1
while line:
if f'# Iteration {i} to stderr' in line:
i += 1
line = f.readline()
self.assertEqual(i, 100001)

# Ensure lines are found from both testlog.txt and console when being verbose.

out = self._run(self.mtest_command + ['-v'])
i = 1
with open(os.path.join(self.logdir, 'testlog.txt'), encoding='utf-8') as f:
line = f.readline()
while line and i < 100001:
if f'# Iteration {i} to stdout' in line:
i += 1
line = f.readline()
self.assertEqual(i, 100001)

i = 1
while line:
if f'# Iteration {i} to stderr' in line:
i += 1
line = f.readline()
self.assertEqual(i, 100001)

lines = out.split('\n')
line_number = 0
i = 1
while line_number < len(lines) and i < 100001:
print('---> %s' % lines[line_number])
if f'# Iteration {i} to stdout' in lines[line_number]:
i += 1
line_number += 1
self.assertEqual(i, 100001)

line_number = 0
i = 1
while line_number < len(lines):
if f'# Iteration {i} to stderr' in lines[line_number]:
i += 1
line_number += 1
self.assertEqual(i, 100001)


def test_testsetups(self):
if not shutil.which('valgrind'):
raise SkipTest('Valgrind not installed.')
Expand Down

0 comments on commit d05b128

Please sign in to comment.