aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2019-03-15 23:57:07 +0100
committerDavid Robillard <d@drobilla.net>2019-03-15 23:57:07 +0100
commitd23dbcc92592fb68f3294889d2b8fac9511094ca (patch)
treeea978ac59e2e0841edec02b192507089602db2d1
parentd633e0b564bd63a7489b1d5992e3e6d10b64f7a1 (diff)
downloadserd-test-performance.tar.gz
serd-test-performance.tar.bz2
serd-test-performance.zip
WIP: Rewrite test frameworktest-performance
-rw-r--r--waflib/extras/autowaf.py547
-rw-r--r--wscript274
2 files changed, 446 insertions, 375 deletions
diff --git a/waflib/extras/autowaf.py b/waflib/extras/autowaf.py
index 92d0e57e..5739b58b 100644
--- a/waflib/extras/autowaf.py
+++ b/waflib/extras/autowaf.py
@@ -4,7 +4,7 @@ import subprocess
import sys
import time
-from waflib import Build, Context, Logs, Options, Utils
+from waflib import Configure, ConfigSet, Build, Context, Logs, Options, Utils
from waflib.TaskGen import feature, before, after
global g_is_child
@@ -17,16 +17,19 @@ g_step = 0
global line_just
line_just = 40
+NONEMPTY = -10
+
+if sys.platform == 'win32':
+ lib_path_name = 'PATH'
+elif sys.platform == 'darwin':
+ lib_path_name = 'DYLD_LIBRARY_PATH'
+else:
+ lib_path_name = 'LD_LIBRARY_PATH'
+
# Compute dependencies globally
# import preproc
# preproc.go_absolute = True
-# Test context that inherits build context to make configuration available
-class TestContext(Build.BuildContext):
- "Run tests"
- cmd = 'test'
- fun = 'test'
-
@feature('c', 'cxx')
@after('apply_incpaths')
def include_config_h(self):
@@ -95,6 +98,21 @@ def add_flags(opt, flags):
opt.add_option('--' + name, action='store_true',
dest=name.replace('-', '_'), help=desc)
+class ConfigureContext(Configure.ConfigurationContext):
+ """configures the project"""
+
+ def __init__(self, **kwargs):
+ super(ConfigureContext, self).__init__(**kwargs)
+ self.run_env = ConfigSet.ConfigSet()
+
+ def store(self):
+ self.env.AUTOWAF_RUN_ENV = self.run_env.get_merged_dict()
+ super(ConfigureContext, self).store()
+
+ def build_path(self, path):
+ """Return `path` within the build directory"""
+ return str(self.path.get_bld().find_node(path))
+
def get_check_func(conf, lang):
if lang == 'c':
return conf.check_cc
@@ -463,13 +481,15 @@ def set_lib_env(conf, name, version):
major_ver = version.split('.')[0]
pkg_var_name = 'PKG_' + name.replace('-', '_') + '_' + major_ver
lib_name = '%s-%s' % (name, major_ver)
+ lib_path = [str(conf.path.get_bld())]
if conf.env.PARDEBUG:
lib_name += 'D'
conf.env[pkg_var_name] = lib_name
conf.env['INCLUDES_' + NAME] = ['${INCLUDEDIR}/%s-%s' % (name, major_ver)]
- conf.env['LIBPATH_' + NAME] = [conf.env.LIBDIR]
+ conf.env['LIBPATH_' + NAME] = lib_path
conf.env['LIB_' + NAME] = [lib_name]
+ conf.run_env.append_unique(lib_path_name, lib_path)
conf.define(NAME + '_VERSION', version)
def set_line_just(conf, width):
@@ -744,18 +764,57 @@ def build_i18n(bld, srcdir, dir, name, sources, copyright_holder=None):
build_i18n_po(bld, srcdir, dir, name, sources, copyright_holder)
build_i18n_mo(bld, srcdir, dir, name, sources, copyright_holder)
-def cd_to_build_dir(ctx, appname):
- top_level = (len(ctx.stack_path) > 1)
- if top_level:
- os.chdir(os.path.join('build', appname))
- else:
- os.chdir('build')
+class ExecutionEnvironment:
+ """Context that sets system environment variables for program execution"""
+ def __init__(self, changes):
+ self.original_environ = os.environ.copy()
-def cd_to_orig_dir(ctx, child):
- if child:
- os.chdir(os.path.join('..', '..'))
- else:
- os.chdir('..')
+ self.diff = {}
+ for path_name, paths in changes.items():
+ value = os.pathsep.join(paths)
+ if path_name in os.environ:
+ value += os.pathsep + os.environ[path_name]
+
+ self.diff[path_name] = value
+
+ os.environ.update(self.diff)
+
+ def __str__(self):
+ return '\n'.join({'%s="%s"' % (k, v) for k, v in self.diff.items()})
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ os.environ = self.original_environ
+
+def show_diff(from_lines, to_lines, from_filename, to_filename):
+ import difflib
+ import sys
+
+ for line in difflib.unified_diff(
+ from_lines, to_lines,
+ fromfile=os.path.abspath(from_filename),
+ tofile=os.path.abspath(to_filename)):
+ sys.stderr.write(line)
+
+def test_file_equals(patha, pathb):
+ import filecmp
+ import io
+
+ for path in (patha, pathb):
+ if not os.access(path, os.F_OK):
+ Logs.pprint('RED', 'error: missing file %s' % path)
+ return False
+
+ if filecmp.cmp(patha, pathb, shallow=False):
+ return True
+
+ with io.open(patha, 'rU', encoding='utf-8') as fa:
+ with io.open(pathb, 'rU', encoding='utf-8') as fb:
+ show_diff(fa.readlines(), fb.readlines(), patha, pathb)
+
+ return False
def bench_time():
if hasattr(time, 'perf_counter'): # Added in Python 3.3
@@ -763,207 +822,275 @@ def bench_time():
else:
return time.time()
-def pre_test(ctx, appname, dirs=['src']):
- Logs.pprint('GREEN', '\n[==========] Running %s tests' % appname)
-
- if not hasattr(ctx, 'autowaf_tests_total'):
- ctx.autowaf_tests_start_time = bench_time()
- ctx.autowaf_tests_total = 0
- ctx.autowaf_tests_failed = 0
- ctx.autowaf_local_tests_total = 0
- ctx.autowaf_local_tests_failed = 0
- ctx.autowaf_tests = {}
-
- ctx.autowaf_tests[appname] = {'total': 0, 'failed': 0}
-
- cd_to_build_dir(ctx, appname)
- if not ctx.env.NO_COVERAGE:
- diropts = ''
- for i in dirs:
- diropts += ' -d ' + i
- clear_log = open('lcov-clear.log', 'w')
+class TestScope:
+ """Scope that maintains pass/fail statistics for a test group"""
+ def __init__(self, name):
+ self.name = name
+ self.n_failed = 0
+ self.n_total = 0
+
+class TestContext(Build.BuildContext):
+ "runs test suite"
+ fun = cmd = 'test'
+
+ def __init__(self, **kwargs):
+ super(TestContext, self).__init__(**kwargs)
+ self.defaults = {'verbosity': 2 if Options.options.verbose else 0}
+ self.n_tests_failed = 0
+ self.n_tests_total = 0
+ self.start_time = bench_time()
+ self.stack = [TestScope(Context.g_module.APPNAME)]
+
+ def finalize(self):
+ if self.n_tests_failed > 0:
+ sys.exit(1)
+
+ super(TestContext, self).finalize()
+
+ def log_header(self, fmt, *args):
+ Logs.info('')
+ self.log_good('=' * 10, fmt % args)
+
+ def log_footer(self, fmt, *args):
+ if self.defaults['verbosity'] > 0:
+ Logs.info('')
+ self.log_good('=' * 10, fmt % args)
+
+ def log_good(self, title, fmt, *args):
+ Logs.pprint('GREEN', '[%s] %s' % (title.center(10), fmt % args))
+
+ def log_bad(self, title, fmt, *args):
+ Logs.pprint('RED', '[%s] %s' % (title.center(10), fmt % args))
+
+ def pre_recurse(self, node):
+ import importlib
+ wscript_module = Context.load_module(node.abspath())
+ group_name = wscript_module.APPNAME
+ self.stack.append(TestScope(group_name))
+
+ bld_dir = str(node.get_bld().parent)
+ Logs.info("Waf: Entering directory `%s'", bld_dir)
+ os.chdir(bld_dir)
+
+ self.log_header('Running %s tests', group_name)
+ if str(node.parent) == Context.top_dir:
+ self.clear_coverage()
+ super(TestContext, self).pre_recurse(node)
+
+ def test_result(self, success):
+ self.stack[-1].n_total += 1
+ self.stack[-1].n_failed += 1 if not success else 0
+
+ def pop(self):
+ scope = self.stack.pop()
+ self.stack[-1].n_total += scope.n_total
+ self.stack[-1].n_failed += scope.n_failed
+ return scope
+
+ def post_recurse(self, node):
+ super(TestContext, self).post_recurse(node)
+
+ scope = self.pop()
+ duration = (bench_time() - self.start_time) * 1000.0
+ self.log_footer('%d tests from %s ran (%d ms total)',
+ scope.n_total, scope.name, duration)
+
+ if not self.env.NO_COVERAGE:
+ if str(node.parent) == Context.top_dir:
+ self.gen_coverage()
+
+ if os.path.exists('coverage/index.html'):
+ self.log_good('COVERAGE', '<file://%s>',
+ os.path.abspath('coverage/index.html'))
+
+ successes = scope.n_total - scope.n_failed
+ Logs.pprint('GREEN', '[ PASSED ] %d tests' % successes)
+ if scope.n_failed > 0:
+ Logs.pprint('RED', '[ FAILED ] %d tests' % scope.n_failed)
+
+ Logs.info("\nWaf: Leaving directory `%s'", os.getcwd())
+ os.chdir(str(self.path))
+
+ def execute(self):
+ self.restore()
+ if not self.all_envs:
+ self.load_envs()
+
+ if not self.env.BUILD_TESTS:
+ self.fatal('Configuration does not include tests')
+
+ with ExecutionEnvironment(self.env.AUTOWAF_RUN_ENV) as env:
+ if self.defaults['verbosity'] > 0:
+ print(env)
+ self.recurse([self.run_dir])
+
+ def src_path(self, path):
+ return os.path.relpath(os.path.join(str(self.path), path))
+
+ def args(self, **kwargs):
+ all_kwargs = self.defaults.copy()
+ all_kwargs.update(kwargs)
+ return all_kwargs
+
+ def test_group(self, name, **kwargs):
+ return TestGroup(self, self.stack[-1].name, name, **self.args(**kwargs))
+
+ def set_test_defaults(self, **kwargs):
+ """Set default arguments to be passed to all tests"""
+ self.defaults.update(kwargs)
+
+ def clear_coverage(self):
+ """Zero old coverage data"""
try:
- try:
- # Clear coverage data
- subprocess.call(('lcov %s -z' % diropts).split(),
- stdout=clear_log, stderr=clear_log)
- except Exception:
- Logs.warn('Failed to run lcov, no coverage report generated')
- finally:
- clear_log.close()
-
-class TestFailed(Exception):
- pass
-
-def post_test(ctx, appname, dirs=['src'], remove=['*boost*', 'c++*']):
- if not ctx.env.NO_COVERAGE:
- diropts = ''
- for i in dirs:
- diropts += ' -d ' + i
- coverage_log = open('lcov-coverage.log', 'w')
- coverage_lcov = open('coverage.lcov', 'w')
- coverage_stripped_lcov = open('coverage-stripped.lcov', 'w')
+ with open('cov-clear.log', 'w') as log:
+ subprocess.call(['lcov', '-z', '-d', str(self.path)],
+ stdout=log, stderr=log)
+
+ except Exception:
+ Logs.warn('Failed to run lcov to clear old coverage data')
+
+ def gen_coverage(self):
+ """Generate coverage data and report"""
try:
- try:
- base = '.'
- if g_is_child:
- base = '..'
-
- # Generate coverage data
- lcov_cmd = 'lcov -c %s -b %s' % (diropts, base)
- if ctx.env.LLVM_COV:
- lcov_cmd += ' --gcov-tool %s' % ctx.env.LLVM_COV[0]
- subprocess.call(lcov_cmd.split(),
- stdout=coverage_lcov, stderr=coverage_log)
-
- # Strip unwanted stuff
- subprocess.call(
- ['lcov', '--remove', 'coverage.lcov'] + remove,
- stdout=coverage_stripped_lcov, stderr=coverage_log)
-
- # Generate HTML coverage output
- if not os.path.isdir('coverage'):
- os.makedirs('coverage')
- subprocess.call(
- 'genhtml -o coverage coverage-stripped.lcov'.split(),
- stdout=coverage_log, stderr=coverage_log)
-
- except Exception:
- Logs.warn('Failed to run lcov, no coverage report generated')
- finally:
- coverage_stripped_lcov.close()
- coverage_lcov.close()
- coverage_log.close()
-
- duration = (bench_time() - ctx.autowaf_tests_start_time) * 1000.0
- total_tests = ctx.autowaf_tests[appname]['total']
- failed_tests = ctx.autowaf_tests[appname]['failed']
- passed_tests = total_tests - failed_tests
- Logs.pprint('GREEN', '\n[==========] %d tests from %s ran (%d ms total)' % (
- total_tests, appname, duration))
- if not ctx.env.NO_COVERAGE:
- Logs.pprint('GREEN', '[----------] Coverage: <file://%s>'
- % os.path.abspath('coverage/index.html'))
-
- Logs.pprint('GREEN', '[ PASSED ] %d tests' % passed_tests)
- if failed_tests > 0:
- Logs.pprint('RED', '[ FAILED ] %d tests' % failed_tests)
- raise TestFailed('Tests from %s failed' % appname)
- Logs.pprint('', '')
-
- top_level = (len(ctx.stack_path) > 1)
- if top_level:
- cd_to_orig_dir(ctx, top_level)
+ with open('cov.lcov', 'w') as out:
+ with open('cov.log', 'w') as err:
+ subprocess.call(['lcov', '-c', '--no-external',
+ '--rc', 'lcov_branch_coverage=1',
+ '-b', '.',
+ '-d', str(self.path)],
+ stdout=out, stderr=err)
+
+ if not os.path.isdir('coverage'):
+ os.makedirs('coverage')
+
+ with open('genhtml.log', 'w') as log:
+ subprocess.call(['genhtml',
+ '-o', 'coverage',
+ '--rc', 'genhtml_branch_coverage=1',
+ 'cov.lcov'],
+ stdout=log, stderr=log)
+
+ except Exception:
+ Logs.warn('Failed to run lcov to generate coverage report')
def run_test(ctx,
appname,
test,
- desired_status=0,
- dirs=['src'],
+ expected=0,
name='',
- header=False,
- quiet=False):
- """Run an individual test.
-
- `test` is either a shell command string, or a list of [name, return status]
- for displaying tests implemented in the calling Python code.
- """
-
- ctx.autowaf_tests_total += 1
- ctx.autowaf_local_tests_total += 1
- ctx.autowaf_tests[appname]['total'] += 1
-
- out = (None, None)
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ verbosity=1):
+ class TestOutput(object):
+ """Test output that is truthy if result is as expected"""
+ def __init__(self, expected_result):
+ self.expected_result = expected_result
+ self.status = self.stdout = self.stderr = None
+
+ def __bool__(self):
+ return (self.expected_result is None or
+ self.result == self.expected_result)
+
+ __nonzero__ = __bool__
+
+ def stream(s):
+ return open(s, 'wb') if type(s) == str else s
+
+ def is_string(s):
+ if sys.version_info[0] < 3:
+ return isinstance(s, basestring)
+ return isinstance(s, str)
+
+ output = TestOutput(expected)
if type(test) == list:
- name = test[0]
- returncode = test[1]
- elif callable(test):
- returncode = test()
- else:
- s = test
- if isinstance(test, type([])):
- s = ' '.join(test)
- if header and not quiet:
- Logs.pprint('Green', '\n[ RUN ] %s' % s)
- cmd = test
- if Options.options.test_wrapper:
- cmd = Options.options.test_wrapper + ' ' + test
- if name == '':
- name = test
-
- proc = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out = proc.communicate()
- returncode = proc.returncode
-
- success = desired_status is None or returncode == desired_status
- if success:
- if not quiet:
- Logs.pprint('GREEN', '[ OK ] %s' % name)
- else:
- Logs.pprint('RED', '[ FAILED ] %s' % name)
- ctx.autowaf_tests_failed += 1
- ctx.autowaf_local_tests_failed += 1
- ctx.autowaf_tests[appname]['failed'] += 1
- if type(test) != list and not callable(test):
- Logs.pprint('RED', test)
-
- if Options.options.verbose and type(test) != list and not callable(test):
- sys.stdout.write(out[0].decode('utf-8'))
- sys.stderr.write(out[1].decode('utf-8'))
-
- return (success, out)
-
-def tests_name(ctx, appname, name='*'):
- if name == '*':
- return appname
- else:
- return '%s.%s' % (appname, name)
-
-def begin_tests(ctx, appname, name='*'):
- ctx.autowaf_local_tests_failed = 0
- ctx.autowaf_local_tests_total = 0
- ctx.autowaf_local_tests_start_time = bench_time()
- Logs.pprint('GREEN', '\n[----------] %s' % (
- tests_name(ctx, appname, name)))
-
- class Handle:
- def __enter__(self):
- pass
-
- def __exit__(self, type, value, traceback):
- end_tests(ctx, appname, name)
+ import pipes
+ name = name if name else ' '.join(map(pipes.quote, test))
+ if verbosity > 1:
+ ctx.log_good('RUN ', name)
- return Handle()
-
-def end_tests(ctx, appname, name='*'):
- duration = (bench_time() - ctx.autowaf_local_tests_start_time) * 1000.0
- total = ctx.autowaf_local_tests_total
- failures = ctx.autowaf_local_tests_failed
- if failures == 0:
- Logs.pprint('GREEN', '[----------] %d tests from %s (%d ms total)' % (
- ctx.autowaf_local_tests_total, tests_name(ctx, appname, name), duration))
+ if Options.options.test_wrapper:
+ test = [Options.options.test_wrapper] + test
+
+ out_stream = open(stdout, 'wb') if is_string(stdout) else None
+ err_stream = open(stderr, 'wb') if is_string(stderr) else None
+ stdout = out_stream if out_stream else stdout
+ stderr = err_stream if err_stream else stderr
+
+ with open(os.devnull, 'wb') as null:
+ out = null if verbosity < 3 and not stdout else stdout
+ err = null if verbosity < 2 and not stderr else stderr
+ proc = subprocess.Popen(test, stdin=stdin, stdout=out, stderr=err)
+ output.stdout, output.stderr = proc.communicate()
+ output.result = proc.returncode
+
+ out_stream = out_stream.close() if out_stream else None
+ err_stream = err_stream.close() if err_stream else None
+ elif callable(test):
+ output.result = test()
else:
- Logs.pprint('RED', '[----------] %d/%d tests from %s (%d ms total)' % (
- total - failures, total, tests_name(ctx, appname, name), duration))
-
-def run_tests(ctx,
- appname,
- tests,
- desired_status=0,
- dirs=['src'],
- name='*',
- headers=False):
- begin_tests(ctx, appname, name)
-
- diropts = ''
- for i in dirs:
- diropts += ' -d ' + i
-
- for i in tests:
- run_test(ctx, appname, i, desired_status, dirs, i, headers)
-
- end_tests(ctx, appname, name)
+ ctx.log_bad('ERROR', name)
+ return output
+
+ if output and verbosity > 0:
+ ctx.log_good(' OK', name)
+ elif not output:
+ ctx.log_bad('FAILED', name)
+
+ return output
+
+class TestGroup:
+ def __init__(self, tst, suitename, name, **kwargs):
+ self.tst = tst
+ self.suitename = suitename
+ self.name = name
+ self.kwargs = kwargs
+ self.start_time = bench_time()
+ tst.stack.append(TestScope(name))
+
+ def label(self):
+ return self.suitename + '.%s' % self.name if self.name else ''
+
+ def args(self, **kwargs):
+ all_kwargs = self.tst.args(**self.kwargs)
+ all_kwargs.update(kwargs)
+ return all_kwargs
+
+ def run(self, test, **kwargs):
+ all_kwargs = self.args(**kwargs)
+
+ if 'stderr' in all_kwargs and all_kwargs['stderr'] == NONEMPTY:
+ import tempfile
+ with tempfile.TemporaryFile(mode='w') as stderr:
+ all_kwargs['stderr'] = stderr
+ output = run_test(self.tst, self.suitename, test, **all_kwargs)
+ if output:
+ output = self.run(lambda: stderr.tell() > 0,
+ expected=True,
+ verbosity=0,
+ name=kwargs['name'] + ' error message')
+ else:
+ output = run_test(self.tst, self.suitename, test, **all_kwargs)
+
+ self.tst.test_result(output)
+ return output
+
+ def __enter__(self):
+ if 'verbosity' in self.kwargs and self.kwargs['verbosity'] > 0:
+ Logs.info('')
+ self.tst.log_good('-' * 10, self.label())
+ return self
+
+ def __exit__(self, type, value, traceback):
+ duration = (bench_time() - self.start_time) * 1000.0
+ scope = self.tst.pop()
+ n_passed = scope.n_total - scope.n_failed
+ if scope.n_failed == 0:
+ self.tst.log_good('-' * 10, '%d tests from %s (%d ms total)',
+ scope.n_total, self.label(), duration)
+ else:
+ self.tst.log_bad('-' * 10, '%d/%d tests from %s (%d ms total)',
+ n_passed, scope.n_total, self.label(), duration)
def run_ldconfig(ctx):
should_run = (ctx.cmd == 'install' and
diff --git a/wscript b/wscript
index 425ca787..2c084733 100644
--- a/wscript
+++ b/wscript
@@ -3,6 +3,7 @@
import glob
import io
import os
+
from waflib import Logs, Options
from waflib.extras import autowaf
@@ -216,18 +217,6 @@ def upload_docs(ctx):
os.system('soelim %s | pre-grohtml troff -man -wall -Thtml | post-grohtml > build/%s.html' % (page, page))
os.system('rsync -avz --delete -e ssh build/%s.html drobilla@drobilla.net:~/drobilla.net/man/' % page)
-def file_equals(patha, pathb):
- import filecmp
-
- if filecmp.cmp(patha, pathb, shallow=False):
- return True
-
- with io.open(patha, 'rU', encoding='utf-8') as fa:
- with io.open(pathb, 'rU', encoding='utf-8') as fb:
- show_diff(fa.readlines(), fb.readlines(), patha, pathb)
-
- return False
-
def earl_assertion(test, passed, asserter):
import datetime
@@ -235,10 +224,6 @@ def earl_assertion(test, passed, asserter):
if asserter is not None:
asserter_str = '\n\tearl:assertedBy <%s> ;' % asserter
- passed_str = 'earl:failed'
- if passed:
- passed_str = 'earl:passed'
-
return '''
[]
a earl:Assertion ;%s
@@ -251,49 +236,33 @@ def earl_assertion(test, passed, asserter):
] .
''' % (asserter_str,
test,
- passed_str,
+ 'earl:passed' if passed else 'earl:failed',
datetime.datetime.now().replace(microsecond=0).isoformat())
-def build_path(ctx, path):
- return os.path.relpath(path, os.getcwd())
-
-def show_diff(from_lines, to_lines, from_filename, to_filename):
- import difflib
- import sys
-
- for line in difflib.unified_diff(
- from_lines, to_lines,
- fromfile=os.path.abspath(from_filename),
- tofile=os.path.abspath(to_filename)):
- sys.stderr.write(line)
-
-def check_output(out_filename, check_filename):
- if not os.access(out_filename, os.F_OK):
- Logs.pprint('RED', 'error: missing output file %s' % out_filename)
- return False
-
- return file_equals(check_filename, out_filename)
-
-def test_thru(ctx, base, path, check_filename, flags, isyntax, osyntax,
- options='', quiet=False):
- in_filename = build_path(ctx, os.path.join(ctx.path.abspath(), path))
- out_filename = build_path(ctx, path + '.thru')
-
- command = ('serdi_static %s %s -i %s -o %s -p foo "%s" "%s" | '
- 'serdi_static %s -i %s -o %s -c foo - "%s" > %s') % (
- options, flags.ljust(5),
- isyntax, isyntax, in_filename, base,
- options, isyntax, osyntax, base, out_filename)
-
- if autowaf.run_test(ctx, APPNAME, command, 0, name=out_filename, quiet=quiet):
- autowaf.run_test(
- ctx, APPNAME,
- lambda: check_output(out_filename, check_filename),
- True,
- name=out_filename,
- quiet=quiet)
- else:
- Logs.pprint('RED', 'FAIL: error running %s' % command)
+serdi = './serdi_static'
+
+def test_thru(group, base, path, check_path, flags, isyntax, osyntax, opts=[]):
+ out_path = path + '.out'
+ out_cmd = [serdi] + opts + [f for sublist in flags for f in sublist] + [
+ '-i', isyntax,
+ '-o', isyntax,
+ '-p', 'foo',
+ group.tst.src_path(path), base]
+
+ thru_path = path + '.thru'
+ thru_cmd = [serdi] + opts + [
+ '-i', isyntax,
+ '-o', osyntax,
+ '-c', 'foo',
+ out_path,
+ base]
+
+ return (group.run(out_cmd, stdout=out_path, verbosity=0, name=out_path) and
+ group.run(thru_cmd, stdout=thru_path, verbosity=0, name=thru_path) and
+ group.run(lambda: autowaf.test_file_equals(thru_path, check_path),
+ expected=True,
+ verbosity=0,
+ name='%s == %s' % (thru_path, check_path)))
def file_uri_to_path(uri):
try:
@@ -305,7 +274,7 @@ def file_uri_to_path(uri):
drive = os.path.splitdrive(path[1:])[0]
return path if not drive else path[1:]
-def test_suite(ctx, base_uri, testdir, report, isyntax, osyntax, options=''):
+def test_suite(ctx, base_uri, testdir, report, isyntax, osyntax, options=[]):
import itertools
srcdir = ctx.path.abspath()
@@ -315,7 +284,7 @@ def test_suite(ctx, base_uri, testdir, report, isyntax, osyntax, options=''):
import subprocess
import re
model = {}
- proc = subprocess.Popen(['./serdi_static', filename], stdout=subprocess.PIPE)
+ proc = subprocess.Popen([serdi, filename], stdout=subprocess.PIPE)
for line in proc.communicate()[0].splitlines():
matches = re.match('<([^ ]*)> <([^ ]*)> <([^ ]*)> \.', line.decode('utf-8'))
if matches:
@@ -333,14 +302,7 @@ def test_suite(ctx, base_uri, testdir, report, isyntax, osyntax, options=''):
if os.getenv('USER') == 'drobilla':
asserter = 'http://drobilla.net/drobilla#me'
- def run_test(command, expected_return, name, quiet=False):
- header = Options.options.verbose
- result = autowaf.run_test(ctx, APPNAME, command, expected_return, name=name, header=header, quiet=quiet)
- if expected_return is not None and expected_return != 0:
- autowaf.run_test(ctx, APPNAME,
- lambda: bool(result[1][1]),
- True, name=name + ' prints error message', quiet=True)
- return result
+ verbosity = 2 if Options.options.verbose else 0
def run_tests(test_class, expected_return):
tests = []
@@ -350,68 +312,66 @@ def test_suite(ctx, base_uri, testdir, report, isyntax, osyntax, options=''):
if len(tests) == 0:
return
- thru_flags = ['-e', '-f', '-b', '-r http://example.org/']
+ thru_flags = [['-e'], ['-f'], ['-b'], ['-r', 'http://example.org/']]
thru_options = []
for n in range(len(thru_flags) + 1):
thru_options += list(itertools.combinations(thru_flags, n))
thru_options_iter = itertools.cycle(thru_options)
- quiet = not Options.options.verbose
tests_name = '%s.%s' % (testdir, test_class[test_class.find('#') + 1:])
- with autowaf.begin_tests(ctx, APPNAME, tests_name):
+ with ctx.test_group(tests_name) as group:
for (num, test) in enumerate(sorted(tests)):
action_node = model[test][mf + 'action'][0]
action = os.path.join('tests', testdir, os.path.basename(action_node))
rel_action = os.path.join(os.path.relpath(srcdir), action)
- abs_action = os.path.join(srcdir, action)
uri = base_uri + os.path.basename(action)
- command = 'serdi_static %s -f %s "%s" > %s' % (
- options, rel_action, uri, action + '.out')
+ command = [serdi] + options + ['-f', rel_action, uri]
# Run strict test
- result = run_test(command, expected_return, action, quiet=quiet)
- if result[0] and ((mf + 'result') in model[test]):
+ if expected_return == 0:
+ result = group.run(command, stdout=action + '.out', name=action)
+ else:
+ result = group.run(command,
+ stdout=action + '.out',
+ stderr=autowaf.NONEMPTY,
+ expected=expected_return,
+ name=action)
+
+ if result and ((mf + 'result') in model[test]):
# Check output against test suite
check_uri = model[test][mf + 'result'][0]
- check_path = build_path(ctx, file_uri_to_path(check_uri))
- result = autowaf.run_test(
- ctx, APPNAME,
- lambda: check_output(action + '.out', check_path),
- True, name=action, quiet=True)
+ check_path = ctx.src_path(file_uri_to_path(check_uri))
+ result = group.run(
+ lambda: autowaf.test_file_equals(action + '.out', check_path),
+ expected=True, name=action)
# Run round-trip tests
- if result[0]:
- test_thru(ctx, uri, action, check_path,
- ' '.join(next(thru_options_iter)),
- isyntax, osyntax, options, quiet=True)
+ if result:
+ test_thru(group, uri, action, check_path,
+ list(next(thru_options_iter)),
+ isyntax, osyntax, options)
# Write test report entry
if report is not None:
- report.write(earl_assertion(test, result[0], asserter))
+ report.write(earl_assertion(test, result, asserter))
# Run lax test
- run_test(command.replace('serdi_static', 'serdi_static -l'),
- None, action + ' lax', True)
-
- def test_types():
- types = []
- for lang in ['Turtle', 'NTriples', 'Trig', 'NQuads']:
- types += [['http://www.w3.org/ns/rdftest#Test%sPositiveSyntax' % lang, 0],
- ['http://www.w3.org/ns/rdftest#Test%sNegativeSyntax' % lang, 1],
- ['http://www.w3.org/ns/rdftest#Test%sNegativeEval' % lang, 1],
- ['http://www.w3.org/ns/rdftest#Test%sEval' % lang, 0]]
- return types
-
- for i in test_types():
+ group.run([command[0]] + ['-l'] + command[1:],
+ expected=None, name=action + ' lax')
+
+ for i in (('http://www.w3.org/ns/rdftest#Test%sPositiveSyntax' % isyntax, 0),
+ ('http://www.w3.org/ns/rdftest#Test%sEval' % isyntax, 0),
+ ('http://www.w3.org/ns/rdftest#Test%sNegativeSyntax' % isyntax, 1),
+ ('http://www.w3.org/ns/rdftest#Test%sNegativeEval' % isyntax, 1)):
run_tests(i[0], i[1])
def test(ctx):
- "runs test suite"
-
+ import tempfile
+
# Create test output directories
for i in ['bad', 'good', 'TurtleTests', 'NTriplesTests', 'NQuadsTests', 'TriGTests']:
try:
- test_dir = os.path.join(autowaf.build_dir(APPNAME, 'tests'), i)
+ test_dir = os.path.join('tests', i)
os.makedirs(test_dir)
for i in glob.glob(test_dir + '/*.*'):
os.remove(i)
@@ -419,70 +379,57 @@ def test(ctx):
pass
srcdir = ctx.path.abspath()
- os.environ['PATH'] = '.' + os.pathsep + os.getenv('PATH')
- autowaf.pre_test(ctx, APPNAME)
- autowaf.run_tests(ctx, APPNAME, ['serd_test'], name='Unit')
+ with ctx.test_group('Unit') as tests:
+ tests.run(['./serd_test'])
- def test_syntax_io(in_name, expected_name, lang):
+ def test_syntax_io(test, in_name, check_name, lang):
in_path = 'tests/good/%s' % in_name
- autowaf.run_test(
- ctx, APPNAME,
- 'serdi_static -o %s "%s/%s" "%s" > %s.out' % (
- lang, srcdir, in_path, in_path, in_path),
- 0, name=in_name)
-
- autowaf.run_test(
- ctx, APPNAME,
- lambda: file_equals('%s/tests/good/%s' % (srcdir, expected_name),
- '%s.out' % in_path),
- True, quiet=True, name=in_name + '-check')
-
- with autowaf.begin_tests(ctx, APPNAME, 'ThroughSyntax'):
- test_syntax_io('base.ttl', 'base.ttl', 'turtle')
- test_syntax_io('qualify-in.ttl', 'qualify-out.ttl', 'turtle')
-
- nul = os.devnull
- autowaf.run_tests(ctx, APPNAME, [
- 'serdi_static %s/tests/good/manifest.ttl > %s' % (srcdir, nul),
- 'serdi_static -v > %s' % nul,
- 'serdi_static -h > %s' % nul,
- 'serdi_static -s "<foo> a <#Thingie> ." > %s' % nul,
- 'serdi_static %s > %s' % (nul, nul)
- ], 0, name='GoodCommands')
-
- autowaf.run_tests(ctx, APPNAME, [
- 'serdi_static -q %s/tests/bad/bad-id-clash.ttl > %s' % (srcdir, nul),
- 'serdi_static > %s' % nul,
- 'serdi_static ftp://example.org/unsupported.ttl > %s' % nul,
- 'serdi_static -i > %s' % nul,
- 'serdi_static -o > %s' % nul,
- 'serdi_static -z > %s' % nul,
- 'serdi_static -p > %s' % nul,
- 'serdi_static -c > %s' % nul,
- 'serdi_static -r > %s' % nul,
- 'serdi_static -i illegal > %s' % nul,
- 'serdi_static -o illegal > %s' % nul,
- 'serdi_static -i turtle > %s' % nul,
- 'serdi_static /no/such/file > %s' % nul],
- 1, name='BadCommands')
-
- with autowaf.begin_tests(ctx, APPNAME, 'IoErrors'):
- # Test read error by reading a directory
- autowaf.run_test(ctx, APPNAME, 'serdi_static -e "file://%s/"' % srcdir,
- 1, name='read_error')
-
- # Test read error with bulk input by reading a directory
- autowaf.run_test(ctx, APPNAME, 'serdi_static "file://%s/"' % srcdir,
- 1, name='read_error_bulk')
-
- # Test write error by writing to /dev/full
+ out_path = in_path + '.out'
+ check_path = '%s/tests/good/%s' % (srcdir, check_name)
+
+ test.run([serdi, '-o', lang, '%s/%s' % (srcdir, in_path), in_path],
+ stdout=out_path, name=in_name)
+
+ test.run(lambda: autowaf.test_file_equals(check_path, out_path),
+ expected=True, name='%s check' % in_name)
+
+ with ctx.test_group('ThroughSyntax') as tests:
+ test_syntax_io(tests, 'base.ttl', 'base.ttl', 'turtle')
+ test_syntax_io(tests, 'qualify-in.ttl', 'qualify-out.ttl', 'turtle')
+
+ with ctx.test_group('GoodCommands') as tests:
+ tests.run([serdi, '%s/tests/good/manifest.ttl' % srcdir])
+ tests.run([serdi, '-v'])
+ tests.run([serdi, '-h'])
+ tests.run([serdi, '-s', '<foo> a <#Thingie> .'])
+ tests.run([serdi, os.devnull])
+ with tempfile.TemporaryFile(mode='r') as stdin:
+ tests.run([serdi, '-'], stdin=stdin)
+
+ with ctx.test_group('BadCommands', expected=1) as tests:
+ tests.run([serdi])
+ tests.run([serdi, '/no/such/file'])
+ tests.run([serdi, 'ftp://example.org/unsupported.ttl'])
+ tests.run([serdi, '-c'])
+ tests.run([serdi, '-i', 'illegal'])
+ tests.run([serdi, '-i', 'turtle'])
+ tests.run([serdi, '-i'])
+ tests.run([serdi, '-o', 'illegal'])
+ tests.run([serdi, '-o'])
+ tests.run([serdi, '-p'])
+ tests.run([serdi, '-q', '%s/tests/bad/bad-id-clash.ttl' % srcdir])
+ tests.run([serdi, '-r'])
+ tests.run([serdi, '-z'])
+
+ with ctx.test_group('IoErrors', expected=1) as tests:
+ tests.run([serdi, '-e', 'file://%s/' % srcdir], name='Read directory')
+ tests.run([serdi, 'file://%s/' % srcdir], name='Bulk read directory')
if os.path.exists('/dev/full'):
- autowaf.run_test(ctx, APPNAME,
- 'serdi_static "file://%s/tests/good/manifest.ttl" > /dev/full' % srcdir,
- 1, name='write_error')
+ tests.run([serdi, 'file://%s/tests/good/manifest.ttl' % srcdir],
+ stdout='/dev/full', name='Write error')
- # Serd-specific test cases
+ # Serd-specific test suites
serd_base = 'http://drobilla.net/sw/serd/tests/'
test_suite(ctx, serd_base + 'good/', 'good', None, 'Turtle', 'NTriples')
test_suite(ctx, serd_base + 'bad/', 'bad', None, 'Turtle', 'NTriples')
@@ -493,8 +440,7 @@ def test(ctx):
'@prefix dc: <http://purl.org/dc/elements/1.1/> .\n')
with open(os.path.join(srcdir, 'serd.ttl')) as serd_ttl:
- for line in serd_ttl:
- report.write(line)
+ report.writelines(serd_ttl)
w3c_base = 'http://www.w3.org/2013/'
test_suite(ctx, w3c_base + 'TurtleTests/',
@@ -504,9 +450,7 @@ def test(ctx):
test_suite(ctx, w3c_base + 'NQuadsTests/',
'NQuadsTests', report, 'NQuads', 'NQuads')
test_suite(ctx, w3c_base + 'TriGTests/',
- 'TriGTests', report, 'TriG', 'NQuads', '-a')
-
- autowaf.post_test(ctx, APPNAME)
+ 'TriGTests', report, 'Trig', 'NQuads', ['-a'])
def posts(ctx):
path = str(ctx.path.abspath())