Skip to content

Commit

Permalink
watchman: start building python based test suite
Browse files Browse the repository at this point in the history
Summary:
We've had a number of compatibility issues with the arcanist
based test suite, so this is an effort to run them using the
python unittest infrastructure.

This new structure allows us to create a single temporary dir
and to create a dir per test case to track the temporary files
and dirs created during the test.  This is beneficial both from
a post-mortem perspective if a test fails, but also because
the paths that show up in the watchman logs will now be easily
recognizable as being associated with a given test.

This will also help us manage the windows integration tests
(#19 (comment))
a bit more sanely; a source of errors in the php tests is that
deleting directory trees can fail if a handle still references
any part of it, and there is often a noticeable lag where we
can hit this state and error out.  By deferring the deletes
until our process end, we should minimize this issue.

I've ported a single integration test to demonstrate what this
looks like, the rest will have to be a series of diffs for
easier review.

Test Plan: `make integration` or `./runtests.py`

Reviewers: sid0

Differential Revision: https://reviews.facebook.net/D43137
  • Loading branch information
wez committed Jul 29, 2015
1 parent 5cc813a commit e7da42b
Show file tree
Hide file tree
Showing 7 changed files with 508 additions and 2 deletions.
8 changes: 6 additions & 2 deletions Makefile.am
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,10 @@ py-install:

# This is invoked via WatchmanIntegrationEngine
py-tests:
time $(PYTHON) $(TESTNAME)
$(PYTHON) $(TESTNAME)

py-integration:
$(PYTHON) runtests.py

py-clean:
-cd python && $(PYTHON) ./setup.py clean --all
Expand All @@ -118,6 +121,7 @@ py-build:
py-tests:
py-clean:
py-install:
py-integration:
endif

if HAVE_RUBY
Expand Down Expand Up @@ -150,7 +154,7 @@ clean-local: py-clean rb-clean
build-tests: $(TESTS)
.PHONY: lint build-tests integration py-tests
# run integration AND unit tests
integration:
integration: py-integration
arc test

tests_argv_t_CPPFLAGS = $(THIRDPARTY_CPPFLAGS)
Expand Down
Empty file added python/tests/__init__.py
Empty file.
100 changes: 100 additions & 0 deletions runtests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
#!/usr/bin/env python
# vim:ts=4:sw=4:et:
import unittest
import os
import os.path
import sys
import tempfile
import shutil
import subprocess
import traceback
import time
import argparse
import atexit

parser = argparse.ArgumentParser(
description="Run the watchman unit and integration tests")
parser.add_argument('-v', '--verbosity', default=2,
help="test runner verbosity")
parser.add_argument(
"--keep",
action='store_true',
help="preserve all temporary files created during test execution")
args = parser.parse_args()

# Ensure that we can find pywatchman
sys.path.append(os.path.join(os.getcwd(), 'python'))
sys.path.append(os.path.join(os.getcwd(), 'tests/integration'))

# We test for this in a test case
os.environ['WATCHMAN_EMPTY_ENV_VAR'] = ''

unittest.installHandler()

# We'll put all our temporary stuff under one dir so that we
# can clean it all up at the end
temp_dir = tempfile.mkdtemp(prefix='watchmantest')
if args.keep:
atexit.register(sys.stdout.write,
'Preserving output in %s\n' % temp_dir)
else:
atexit.register(shutil.rmtree, temp_dir)
# Redirect all temporary files to that location
tempfile.tempdir = temp_dir

# Start up a shared watchman instance for the tests.
# We defer the import until after we've modified the python path
import WatchmanInstance
inst = WatchmanInstance.Instance()
inst.start()

# Allow tests to locate our instance by default
os.environ['WATCHMAN_SOCK'] = inst.getSockPath()


class Result(unittest.TestResult):
# Make it easier to spot success/failure by coloring the status
# green for pass, red for fail and yellow for skip.
# also print the elapsed time per test

def startTest(self, test):
self.startTime = time.time()
super(Result, self).startTest(test)

def addSuccess(self, test):
elapsed = time.time() - self.startTime
super(Result, self).addSuccess(test)
print('\033[32mPASS\033[0m %s (%.3fs)' % (test.id(), elapsed))

def addSkip(self, test, reason):
elapsed = time.time() - self.startTime
super(Result, self).addSkip(test, reason)
print('\033[33mSKIP\033[0m %s (%.3fs) %s' %
(test.id(), elapsed, reason))

def __printFail(self, test, err):
elapsed = time.time() - self.startTime
t, val, trace = err
print('\033[31mFAIL\033[0m %s (%.3fs)\n%s' % (
test.id(),
elapsed,
''.join(traceback.format_exception(t, val, trace))))

def addFailure(self, test, err):
self.__printFail(test, err)
super(Result, self).addFailure(test, err)

def addError(self, test, err):
self.__printFail(test, err)
super(Result, self).addError(test, err)


loader = unittest.TestLoader()
suite = unittest.TestSuite()
for d in ['python/tests', 'tests/integration']:
suite.addTests(loader.discover(d, top_level_dir=d))

unittest.TextTestRunner(
resultclass=Result,
verbosity=args.verbosity
).run(suite)
68 changes: 68 additions & 0 deletions tests/integration/WatchmanInstance.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
# vim:ts=4:sw=4:et:
# Copyright 2012-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
import tempfile
import json
import os.path
import subprocess
import pywatchman
import time


class Instance(object):
# Tracks a running watchman instance. It is created with an
# overridden global configuration file; you may pass that
# in to the constructor

def __init__(self, config={}):
self.base_dir = tempfile.mkdtemp(prefix='inst')
self.cfg_file = os.path.join(self.base_dir, "config.json")
self.log_file_name = os.path.join(self.base_dir, "log")
self.sock_file = os.path.join(self.base_dir, "sock")
self.state_file = os.path.join(self.base_dir, "state")
with open(self.cfg_file, "w") as f:
f.write(json.dumps(config))
self.log_file = open(self.log_file_name, 'w+')

def __del__(self):
self.stop()

def getSockPath(self):
return self.sock_file

def stop(self):
if self.proc:
self.proc.kill()
self.proc.wait()
self.log_file.close()

def start(self):
args = [
'./watchman',
'--foreground',
'--sockname={}'.format(self.sock_file),
'--logfile={}'.format(self.log_file_name),
'--statefile={}'.format(self.state_file),
'--log-level=2',
]
env = os.environ.copy()
env["WATCHMAN_CONFIG_FILE"] = self.cfg_file
self.proc = subprocess.Popen(args,
env=env,
stdin=None,
stdout=self.log_file,
stderr=self.log_file)

# wait for it to come up
last_err = None
for i in xrange(1, 10):
try:
client = pywatchman.client(sockpath=self.sock_file)
self.pid = client.query('get-pid')['pid']
break
except Exception as e:
last_err = e
time.sleep(0.1)

if not self.pid:
raise last_err
112 changes: 112 additions & 0 deletions tests/integration/WatchmanTestCase.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
# vim:ts=4:sw=4:et:
# Copyright 2012-present Facebook, Inc.
# Licensed under the Apache License, Version 2.0
import errno
import unittest
import pywatchman
import time
import tempfile
import os.path
import os


class WatchmanTestCase(unittest.TestCase):

def getClient(self):
if not hasattr(self, 'client'):
self.client = pywatchman.client()
return self.client

def __logTestInfo(self, test, msg):
if hasattr(self, 'client'):
try:
self.getClient().query('log', 'debug',
'TEST: %s %s\n\n' % (test, msg))
except Exception as e:
pass

def run(self, result=None):
# Arrange for any temporary stuff we create to go under
# our global tempdir and put it in a dir named for the test
saved_root = tempfile.tempdir
try:
tempfile.tempdir = os.path.join(saved_root, self.id())
os.mkdir(tempfile.tempdir)
self.__logTestInfo(self.id(), 'BEGIN')
return super(WatchmanTestCase, self).run(result)
finally:
tempfile.tempdir = saved_root
self.__logTestInfo(self.id(), 'END')

def touch(self, fname, times=None):
try:
os.utime(fname, times)
except OSError as e:
if e.errno == errno.ENOENT:
with open(fname, 'a'):
os.utime(fname, times)
else:
raise

def touchRelative(self, base, *fname):
fname = os.path.join(base, *fname)
self.touch(fname, None)

def __del__(self):
if hasattr(self, 'client'):
try:
self.watchmanCommand('watch-del-all')
except Exception as e:
pass

def watchmanCommand(self, *args):
return self.getClient().query(*args)

# Continually invoke `cond` until it returns true or timeout
# is reached. Returns a tuple of [bool, result] where the
# first element of the tuple indicates success/failure and
# the second element is the return value from the condition
def waitFor(self, cond, timeout=10):
deadline = time.time() + timeout
res = None
while time.time() < deadline:
res = cond()
if res:
return [True, res]
time.sleep(0.03)
return [False, res]

def assertWaitFor(self, cond, timeout=10, message=None):
status, res = self.waitFor(cond, timeout)
if status:
return res
if message is None:
message = "%s was not met in %s seconds: %s" % (cond, timeout, res)
self.fail(message)

def getFileList(self, root, cursor=None, relativeRoot=None):
expr = {
"expression": ["exists"],
"fields": ["name"],
}
if cursor:
expr['since'] = cursor
if relativeRoot:
expr['relative_root'] = relativeRoot
res = self.watchmanCommand('query', root, expr)
files = sorted(res['files'])
self.last_file_list = files
return files

def normFileList(self, files):
return sorted(map(os.path.normpath, files))

# Wait for the file list to match the input set
def assertFileList(self, root, files=[], cursor=None,
relativeRoot=None, message=None):
expected_files = self.normFileList(files)
st, res = self.waitFor(
lambda: self.getFileList(root, cursor=cursor,
relativeRoot=relativeRoot
) == expected_files)
self.assertEqual(self.last_file_list, expected_files, message)
Empty file added tests/integration/__init__.py
Empty file.

0 comments on commit e7da42b

Please sign in to comment.