aboutsummaryrefslogtreecommitdiffstats
path: root/extras/autowaf.py
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2019-03-17 17:31:04 +0100
committerDavid Robillard <d@drobilla.net>2019-03-17 17:31:04 +0100
commit1c6fb2b3543d1229eadaa2af007383fbcf82289d (patch)
treed2dcbaf61f3749f73dc7a5e10d3fc6cd5e6e129a /extras/autowaf.py
parent9ced2d582681fa87b78ed67f186ed94f4bec1178 (diff)
downloadserd-1c6fb2b3543d1229eadaa2af007383fbcf82289d.tar.gz
serd-1c6fb2b3543d1229eadaa2af007383fbcf82289d.tar.bz2
serd-1c6fb2b3543d1229eadaa2af007383fbcf82289d.zip
Squashed 'waflib/' changes from 915dcb17..e7a29b6b
e7a29b6b Upgrade to waf 2.0.15 8280f9de Add command for running executables from the build directory 8073c1ad Make make_simple_dox() safe in case of exception 70d03b82 Avoid use of global counter hacks for configuration display b7d689a4 Rewrite test framework 94deadf0 Automatically add options and move add_flags() to options context f4259ee4 Reduce system include path noise 927b6082 Automatically display configuration header c44b8f3b Set line justification from a constant in the wscript a48e26fd Automatically detect if wscript has a test hook ef66724d Save runtime variables in the environment 63bcbcd3 Clean up TestContext b1d95050 Add ExecutionContext for setting runtime environment 387c1dfa Add show_diff() and test_file_equals() utilities 29d4d293 Fix in-tree library paths 9fde01f4 Add custom configuration context 6d3612fd Add lib_path_name constant git-subtree-dir: waflib git-subtree-split: e7a29b6b9b2f842314244c23c14d8f8f560904e1
Diffstat (limited to 'extras/autowaf.py')
-rw-r--r--extras/autowaf.py681
1 files changed, 428 insertions, 253 deletions
diff --git a/extras/autowaf.py b/extras/autowaf.py
index 92d0e57e..51077d18 100644
--- a/extras/autowaf.py
+++ b/extras/autowaf.py
@@ -4,40 +4,46 @@ import subprocess
import sys
import time
-from waflib import Build, Context, Logs, Options, Utils
+from waflib import Configure, ConfigSet, Build, Context, Logs, Options, Utils
from waflib.TaskGen import feature, before, after
global g_is_child
g_is_child = False
-# Only run autowaf hooks once (even if sub projects call several times)
-global g_step
-g_step = 0
+NONEMPTY = -10
-global line_just
-line_just = 40
+if sys.platform == 'win32':
+ lib_path_name = 'PATH'
+elif sys.platform == 'darwin':
+ lib_path_name = 'DYLD_LIBRARY_PATH'
+else:
+ lib_path_name = 'LD_LIBRARY_PATH'
# Compute dependencies globally
# import preproc
# preproc.go_absolute = True
-# Test context that inherits build context to make configuration available
-class TestContext(Build.BuildContext):
- "Run tests"
- cmd = 'test'
- fun = 'test'
-
@feature('c', 'cxx')
@after('apply_incpaths')
def include_config_h(self):
self.env.append_value('INCPATHS', self.bld.bldnode.abspath())
-def set_options(opt, debug_by_default=False, test=False):
- "Add standard autowaf options if they havn't been added yet"
- global g_step
- if g_step > 0:
- return
+class OptionsContext(Options.OptionsContext):
+ def __init__(self, **kwargs):
+ super(OptionsContext, self).__init__(**kwargs)
+ set_options(self)
+ def configuration_options(self):
+ return self.get_option_group('Configuration options')
+
+ def add_flags(self, group, flags):
+ """Tersely add flags (a dictionary of longname:desc) to a group"""
+ for name, desc in flags.items():
+ group.add_option('--' + name, action='store_true',
+ dest=name.replace('-', '_'), help=desc)
+
+def set_options(opt, debug_by_default=False):
+ "Add standard autowaf options"
opts = opt.get_option_group('Configuration options')
# Standard directory options
@@ -77,7 +83,7 @@ def set_options(opt, debug_by_default=False, test=False):
help="build documentation (requires doxygen)")
# Test options
- if test:
+ if hasattr(Context.g_module, 'test'):
test_opts = opt.add_option_group('Test options', '')
opts.add_option('-T', '--test', action='store_true', dest='build_tests',
help='build unit tests')
@@ -88,12 +94,41 @@ def set_options(opt, debug_by_default=False, test=False):
dest='test_wrapper',
help='command prefix for tests (e.g. valgrind)')
- g_step = 1
+ # Run options
+ run_opts = opt.add_option_group('Run options')
+ run_opts.add_option('--cmd', type='string', dest='cmd',
+ help='command to run from build directory')
+
+class ConfigureContext(Configure.ConfigurationContext):
+ """configures the project"""
-def add_flags(opt, flags):
- for name, desc in flags.items():
- opt.add_option('--' + name, action='store_true',
- dest=name.replace('-', '_'), help=desc)
+ def __init__(self, **kwargs):
+ self.line_just = 45
+ if hasattr(Context.g_module, 'line_just'):
+ self.line_just = Context.g_module.line_just
+
+ super(ConfigureContext, self).__init__(**kwargs)
+ self.run_env = ConfigSet.ConfigSet()
+ self.system_include_paths = set()
+
+ def pre_recurse(self, node):
+ if len(self.stack_path) == 1:
+ Logs.pprint('BOLD', 'Configuring %s' % node.parent.srcpath())
+ super(ConfigureContext, self).pre_recurse(node)
+
+ def store(self):
+ self.env.AUTOWAF_RUN_ENV = self.run_env.get_merged_dict()
+ for path in sorted(self.system_include_paths):
+ if 'COMPILER_CC' in self.env:
+ self.env.append_value('CFLAGS', ['-isystem', path])
+ if 'COMPILER_CXX' in self.env:
+ self.env.append_value('CXXFLAGS', ['-isystem', path])
+
+ super(ConfigureContext, self).store()
+
+ def build_path(self, path='.'):
+ """Return `path` within the build directory"""
+ return str(self.path.get_bld().find_node(path))
def get_check_func(conf, lang):
if lang == 'c':
@@ -179,14 +214,8 @@ def check_pkg(conf, name, **args):
conf.env[var_name] = CheckType.OPTIONAL
if not conf.env.MSVC_COMPILER and 'system' in args and args['system']:
- includes = conf.env['INCLUDES_' + nameify(args['uselib_store'])]
- for path in includes:
- if 'COMPILER_CC' in conf.env:
- conf.env.append_value('CFLAGS', ['-isystem', path])
- if 'COMPILER_CXX' in conf.env:
- conf.env.append_value('CXXFLAGS', ['-isystem', path])
-
- conf.env.append_value('CXXFLAGS', ['-isystem', '/usr/local/include'])
+ conf.system_include_paths.update(
+ conf.env['INCLUDES_' + nameify(args['uselib_store'])])
def normpath(path):
if sys.platform == 'win32':
@@ -195,10 +224,6 @@ def normpath(path):
return os.path.normpath(path)
def configure(conf):
- global g_step
- if g_step > 1:
- return
-
def append_cxx_flags(flags):
conf.env.append_value('CFLAGS', flags)
conf.env.append_value('CXXFLAGS', flags)
@@ -344,11 +369,9 @@ def configure(conf):
conf.env.prepend_value('CFLAGS', '-I' + os.path.abspath('.'))
conf.env.prepend_value('CXXFLAGS', '-I' + os.path.abspath('.'))
- g_step = 2
def display_summary(conf, msgs=None):
- global g_is_child
- if not g_is_child:
+ if len(conf.stack_path) == 1:
display_msg(conf, "Install prefix", conf.env['PREFIX'])
if 'COMPILER_CC' in conf.env:
display_msg(conf, "C Flags", ' '.join(conf.env['CFLAGS']))
@@ -463,25 +486,17 @@ def set_lib_env(conf, name, version):
major_ver = version.split('.')[0]
pkg_var_name = 'PKG_' + name.replace('-', '_') + '_' + major_ver
lib_name = '%s-%s' % (name, major_ver)
+ lib_path = [str(conf.path.get_bld())]
if conf.env.PARDEBUG:
lib_name += 'D'
conf.env[pkg_var_name] = lib_name
conf.env['INCLUDES_' + NAME] = ['${INCLUDEDIR}/%s-%s' % (name, major_ver)]
- conf.env['LIBPATH_' + NAME] = [conf.env.LIBDIR]
+ conf.env['LIBPATH_' + NAME] = lib_path
conf.env['LIB_' + NAME] = [lib_name]
+ conf.run_env.append_unique(lib_path_name, lib_path)
conf.define(NAME + '_VERSION', version)
-def set_line_just(conf, width):
- global line_just
- line_just = max(line_just, width)
- conf.line_just = line_just
-
-def display_header(title):
- global g_is_child
- if g_is_child:
- Logs.pprint('BOLD', title)
-
def display_msg(conf, msg, status=None, color=None):
color = 'CYAN'
if type(status) == bool and status:
@@ -613,7 +628,8 @@ def make_simple_dox(name):
os.chdir(top)
except Exception as e:
Logs.error("Failed to fix up %s documentation: %s" % (name, e))
-
+ finally:
+ os.chdir(top)
def build_dox(bld, name, version, srcdir, blddir, outdir='', versioned=True):
"""Build Doxygen API documentation"""
@@ -744,18 +760,76 @@ def build_i18n(bld, srcdir, dir, name, sources, copyright_holder=None):
build_i18n_po(bld, srcdir, dir, name, sources, copyright_holder)
build_i18n_mo(bld, srcdir, dir, name, sources, copyright_holder)
-def cd_to_build_dir(ctx, appname):
- top_level = (len(ctx.stack_path) > 1)
- if top_level:
- os.chdir(os.path.join('build', appname))
- else:
- os.chdir('build')
+class ExecutionEnvironment:
+ """Context that sets system environment variables for program execution"""
+ def __init__(self, changes):
+ self.original_environ = os.environ.copy()
-def cd_to_orig_dir(ctx, child):
- if child:
- os.chdir(os.path.join('..', '..'))
- else:
- os.chdir('..')
+ self.diff = {}
+ for path_name, paths in changes.items():
+ value = os.pathsep.join(paths)
+ if path_name in os.environ:
+ value += os.pathsep + os.environ[path_name]
+
+ self.diff[path_name] = value
+
+ os.environ.update(self.diff)
+
+ def __str__(self):
+ return '\n'.join({'%s="%s"' % (k, v) for k, v in self.diff.items()})
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ os.environ = self.original_environ
+
+class RunContext(Build.BuildContext):
+ "runs an executable from the build directory"
+ cmd = 'run'
+
+ def execute(self):
+ self.restore()
+ if not self.all_envs:
+ self.load_envs()
+
+ with ExecutionEnvironment(self.env.AUTOWAF_RUN_ENV) as env:
+ if Options.options.verbose:
+ Logs.pprint('GREEN', str(env) + '\n')
+
+ if Options.options.cmd:
+ Logs.pprint('GREEN', 'Running %s' % Options.options.cmd)
+ subprocess.call(Options.options.cmd, shell=True)
+ else:
+ Logs.error("error: Missing --cmd option for run command")
+
+def show_diff(from_lines, to_lines, from_filename, to_filename):
+ import difflib
+ import sys
+
+ for line in difflib.unified_diff(
+ from_lines, to_lines,
+ fromfile=os.path.abspath(from_filename),
+ tofile=os.path.abspath(to_filename)):
+ sys.stderr.write(line)
+
+def test_file_equals(patha, pathb):
+ import filecmp
+ import io
+
+ for path in (patha, pathb):
+ if not os.access(path, os.F_OK):
+ Logs.pprint('RED', 'error: missing file %s' % path)
+ return False
+
+ if filecmp.cmp(patha, pathb, shallow=False):
+ return True
+
+ with io.open(patha, 'rU', encoding='utf-8') as fa:
+ with io.open(pathb, 'rU', encoding='utf-8') as fb:
+ show_diff(fa.readlines(), fb.readlines(), patha, pathb)
+
+ return False
def bench_time():
if hasattr(time, 'perf_counter'): # Added in Python 3.3
@@ -763,207 +837,308 @@ def bench_time():
else:
return time.time()
-def pre_test(ctx, appname, dirs=['src']):
- Logs.pprint('GREEN', '\n[==========] Running %s tests' % appname)
-
- if not hasattr(ctx, 'autowaf_tests_total'):
- ctx.autowaf_tests_start_time = bench_time()
- ctx.autowaf_tests_total = 0
- ctx.autowaf_tests_failed = 0
- ctx.autowaf_local_tests_total = 0
- ctx.autowaf_local_tests_failed = 0
- ctx.autowaf_tests = {}
-
- ctx.autowaf_tests[appname] = {'total': 0, 'failed': 0}
-
- cd_to_build_dir(ctx, appname)
- if not ctx.env.NO_COVERAGE:
- diropts = ''
- for i in dirs:
- diropts += ' -d ' + i
- clear_log = open('lcov-clear.log', 'w')
- try:
- try:
- # Clear coverage data
- subprocess.call(('lcov %s -z' % diropts).split(),
- stdout=clear_log, stderr=clear_log)
- except Exception:
- Logs.warn('Failed to run lcov, no coverage report generated')
- finally:
- clear_log.close()
-
-class TestFailed(Exception):
- pass
-
-def post_test(ctx, appname, dirs=['src'], remove=['*boost*', 'c++*']):
- if not ctx.env.NO_COVERAGE:
- diropts = ''
- for i in dirs:
- diropts += ' -d ' + i
- coverage_log = open('lcov-coverage.log', 'w')
- coverage_lcov = open('coverage.lcov', 'w')
- coverage_stripped_lcov = open('coverage-stripped.lcov', 'w')
+class TestOutput:
+ """Test output that is truthy if result is as expected"""
+ def __init__(self, expected, result=None):
+ self.stdout = self.stderr = None
+ self.expected = expected
+ self.result = result
+
+ def __bool__(self):
+ return self.expected is None or self.result == self.expected
+
+ __nonzero__ = __bool__
+
+def is_string(s):
+ if sys.version_info[0] < 3:
+ return isinstance(s, basestring)
+ return isinstance(s, str)
+
+class TestScope:
+ """Scope for running tests that maintains pass/fail statistics"""
+ def __init__(self, tst, name, defaults):
+ self.tst = tst
+ self.name = name
+ self.defaults = defaults
+ self.n_failed = 0
+ self.n_total = 0
+
+ def run(self, test, **kwargs):
+ if callable(test):
+ output = self._run_callable(test, **kwargs)
+ elif type(test) == list:
+ if 'name' not in kwargs:
+ import pipes
+ kwargs['name'] = ' '.join(map(pipes.quote, test))
+
+ output = self._run_command(test, **kwargs)
+ else:
+ raise Exception("Unknown test type")
+
+ if not output:
+ self.tst.log_bad('FAILED', kwargs['name'])
+
+ return self.tst.test_result(output)
+
+ def _run_callable(self, test, **kwargs):
+ expected = kwargs['expected'] if 'expected' in kwargs else True
+ return TestOutput(expected, test())
+
+ def _run_command(self, test, **kwargs):
+ if 'stderr' in kwargs and kwargs['stderr'] == NONEMPTY:
+ # Run with a temp file for stderr and check that it is non-empty
+ import tempfile
+ with tempfile.TemporaryFile(mode='w') as stderr:
+ kwargs['stderr'] = stderr
+ output = self.run(test, **kwargs)
+ return (output if not output else
+ self.run(
+ lambda: stderr.tell() > 0,
+ name=kwargs['name'] + ' error message'))
+
try:
- try:
- base = '.'
- if g_is_child:
- base = '..'
-
- # Generate coverage data
- lcov_cmd = 'lcov -c %s -b %s' % (diropts, base)
- if ctx.env.LLVM_COV:
- lcov_cmd += ' --gcov-tool %s' % ctx.env.LLVM_COV[0]
- subprocess.call(lcov_cmd.split(),
- stdout=coverage_lcov, stderr=coverage_log)
-
- # Strip unwanted stuff
- subprocess.call(
- ['lcov', '--remove', 'coverage.lcov'] + remove,
- stdout=coverage_stripped_lcov, stderr=coverage_log)
-
- # Generate HTML coverage output
- if not os.path.isdir('coverage'):
- os.makedirs('coverage')
- subprocess.call(
- 'genhtml -o coverage coverage-stripped.lcov'.split(),
- stdout=coverage_log, stderr=coverage_log)
-
- except Exception:
- Logs.warn('Failed to run lcov, no coverage report generated')
+ # Run with stdout and stderr set to the appropriate streams
+ out_stream = self._stream('stdout', kwargs)
+ err_stream = self._stream('stderr', kwargs)
+ return self._exec(test, **kwargs)
finally:
- coverage_stripped_lcov.close()
- coverage_lcov.close()
- coverage_log.close()
-
- duration = (bench_time() - ctx.autowaf_tests_start_time) * 1000.0
- total_tests = ctx.autowaf_tests[appname]['total']
- failed_tests = ctx.autowaf_tests[appname]['failed']
- passed_tests = total_tests - failed_tests
- Logs.pprint('GREEN', '\n[==========] %d tests from %s ran (%d ms total)' % (
- total_tests, appname, duration))
- if not ctx.env.NO_COVERAGE:
- Logs.pprint('GREEN', '[----------] Coverage: <file://%s>'
- % os.path.abspath('coverage/index.html'))
-
- Logs.pprint('GREEN', '[ PASSED ] %d tests' % passed_tests)
- if failed_tests > 0:
- Logs.pprint('RED', '[ FAILED ] %d tests' % failed_tests)
- raise TestFailed('Tests from %s failed' % appname)
- Logs.pprint('', '')
-
- top_level = (len(ctx.stack_path) > 1)
- if top_level:
- cd_to_orig_dir(ctx, top_level)
-
-def run_test(ctx,
- appname,
- test,
- desired_status=0,
- dirs=['src'],
- name='',
- header=False,
- quiet=False):
- """Run an individual test.
-
- `test` is either a shell command string, or a list of [name, return status]
- for displaying tests implemented in the calling Python code.
- """
-
- ctx.autowaf_tests_total += 1
- ctx.autowaf_local_tests_total += 1
- ctx.autowaf_tests[appname]['total'] += 1
+ out_stream = out_stream.close() if out_stream else None
+ err_stream = err_stream.close() if err_stream else None
+
+ def _stream(self, stream_name, kwargs):
+ s = kwargs[stream_name] if stream_name in kwargs else None
+ if is_string(s):
+ kwargs[stream_name] = open(s, 'wb')
+ return kwargs[stream_name]
+ return None
+
+ def _exec(self,
+ test,
+ expected=0,
+ name='',
+ stdin=None,
+ stdout=None,
+ stderr=None,
+ verbosity=1):
+ def stream(s):
+ return open(s, 'wb') if type(s) == str else s
+
+ if verbosity > 1:
+ self.tst.log_good('RUN ', name)
- out = (None, None)
- if type(test) == list:
- name = test[0]
- returncode = test[1]
- elif callable(test):
- returncode = test()
- else:
- s = test
- if isinstance(test, type([])):
- s = ' '.join(test)
- if header and not quiet:
- Logs.pprint('Green', '\n[ RUN ] %s' % s)
- cmd = test
if Options.options.test_wrapper:
- cmd = Options.options.test_wrapper + ' ' + test
- if name == '':
- name = test
-
- proc = subprocess.Popen(cmd, shell=True,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out = proc.communicate()
- returncode = proc.returncode
-
- success = desired_status is None or returncode == desired_status
- if success:
- if not quiet:
- Logs.pprint('GREEN', '[ OK ] %s' % name)
- else:
- Logs.pprint('RED', '[ FAILED ] %s' % name)
- ctx.autowaf_tests_failed += 1
- ctx.autowaf_local_tests_failed += 1
- ctx.autowaf_tests[appname]['failed'] += 1
- if type(test) != list and not callable(test):
- Logs.pprint('RED', test)
-
- if Options.options.verbose and type(test) != list and not callable(test):
- sys.stdout.write(out[0].decode('utf-8'))
- sys.stderr.write(out[1].decode('utf-8'))
-
- return (success, out)
-
-def tests_name(ctx, appname, name='*'):
- if name == '*':
- return appname
- else:
- return '%s.%s' % (appname, name)
+ test = [Options.options.test_wrapper] + test
-def begin_tests(ctx, appname, name='*'):
- ctx.autowaf_local_tests_failed = 0
- ctx.autowaf_local_tests_total = 0
- ctx.autowaf_local_tests_start_time = bench_time()
- Logs.pprint('GREEN', '\n[----------] %s' % (
- tests_name(ctx, appname, name)))
+ output = TestOutput(expected)
+ with open(os.devnull, 'wb') as null:
+ out = null if verbosity < 3 and not stdout else stdout
+ err = null if verbosity < 2 and not stderr else stderr
+ proc = subprocess.Popen(test, stdin=stdin, stdout=out, stderr=err)
+ output.stdout, output.stderr = proc.communicate()
+ output.result = proc.returncode
- class Handle:
- def __enter__(self):
- pass
+ if output and verbosity > 0:
+ self.tst.log_good(' OK', name)
- def __exit__(self, type, value, traceback):
- end_tests(ctx, appname, name)
+ return output
- return Handle()
+class TestContext(Build.BuildContext):
+ "runs test suite"
+ fun = cmd = 'test'
-def end_tests(ctx, appname, name='*'):
- duration = (bench_time() - ctx.autowaf_local_tests_start_time) * 1000.0
- total = ctx.autowaf_local_tests_total
- failures = ctx.autowaf_local_tests_failed
- if failures == 0:
- Logs.pprint('GREEN', '[----------] %d tests from %s (%d ms total)' % (
- ctx.autowaf_local_tests_total, tests_name(ctx, appname, name), duration))
- else:
- Logs.pprint('RED', '[----------] %d/%d tests from %s (%d ms total)' % (
- total - failures, total, tests_name(ctx, appname, name), duration))
-
-def run_tests(ctx,
- appname,
- tests,
- desired_status=0,
- dirs=['src'],
- name='*',
- headers=False):
- begin_tests(ctx, appname, name)
-
- diropts = ''
- for i in dirs:
- diropts += ' -d ' + i
-
- for i in tests:
- run_test(ctx, appname, i, desired_status, dirs, i, headers)
-
- end_tests(ctx, appname, name)
+ def __init__(self, **kwargs):
+ super(TestContext, self).__init__(**kwargs)
+ self.start_time = bench_time()
+ self.max_depth = 1
+
+ defaults = {'verbosity': Options.options.verbose}
+ self.stack = [TestScope(self, Context.g_module.APPNAME, defaults)]
+
+ def defaults(self):
+ return self.stack[-1].defaults
+
+ def finalize(self):
+ if self.stack[-1].n_failed > 0:
+ sys.exit(1)
+
+ super(TestContext, self).finalize()
+
+ def __call__(self, test, **kwargs):
+ return self.stack[-1].run(test, **self.args(**kwargs))
+
+ def file_equals(self, from_path, to_path, **kwargs):
+ kwargs.update({'expected': True,
+ 'name': '%s == %s' % (from_path, to_path)})
+ return self(lambda: test_file_equals(from_path, to_path), **kwargs)
+
+ def log_good(self, title, fmt, *args):
+ Logs.pprint('GREEN', '[%s] %s' % (title.center(10), fmt % args))
+
+ def log_bad(self, title, fmt, *args):
+ Logs.pprint('RED', '[%s] %s' % (title.center(10), fmt % args))
+
+ def pre_recurse(self, node):
+ wscript_module = Context.load_module(node.abspath())
+ group_name = wscript_module.APPNAME
+ self.stack.append(TestScope(self, group_name, self.defaults()))
+ self.max_depth = max(self.max_depth, len(self.stack) - 1)
+
+ bld_dir = node.get_bld().parent
+ if bld_dir != self.path.get_bld():
+ Logs.info('')
+
+ self.original_dir = os.getcwd()
+ Logs.info("Waf: Entering directory `%s'\n", bld_dir)
+ os.chdir(str(bld_dir))
+
+ if str(node.parent) == Context.top_dir:
+ self.clear_coverage()
+
+ self.log_good('=' * 10, 'Running %s tests', group_name)
+ super(TestContext, self).pre_recurse(node)
+
+ def test_result(self, success):
+ self.stack[-1].n_total += 1
+ self.stack[-1].n_failed += 1 if not success else 0
+ return success
+
+ def pop(self):
+ scope = self.stack.pop()
+ self.stack[-1].n_total += scope.n_total
+ self.stack[-1].n_failed += scope.n_failed
+ return scope
+
+ def post_recurse(self, node):
+ super(TestContext, self).post_recurse(node)
+
+ scope = self.pop()
+ duration = (bench_time() - self.start_time) * 1000.0
+ is_top = str(node.parent) == str(Context.top_dir)
+
+ if is_top and self.max_depth > 1:
+ Logs.info('')
+
+ self.log_good('=' * 10, '%d tests from %s ran (%d ms total)',
+ scope.n_total, scope.name, duration)
+
+ if not self.env.NO_COVERAGE:
+ if is_top:
+ self.gen_coverage()
+
+ if os.path.exists('coverage/index.html'):
+ self.log_good('COVERAGE', '<file://%s>',
+ os.path.abspath('coverage/index.html'))
+
+ successes = scope.n_total - scope.n_failed
+ Logs.pprint('GREEN', '[ PASSED ] %d tests' % successes)
+ if scope.n_failed > 0:
+ Logs.pprint('RED', '[ FAILED ] %d tests' % scope.n_failed)
+ if is_top:
+ Logs.info("\nWaf: Leaving directory `%s'" % os.getcwd())
+
+ os.chdir(self.original_dir)
+
+ def execute(self):
+ self.restore()
+ if not self.all_envs:
+ self.load_envs()
+
+ if not self.env.BUILD_TESTS:
+ self.fatal('Configuration does not include tests')
+
+ with ExecutionEnvironment(self.env.AUTOWAF_RUN_ENV) as env:
+ if self.defaults()['verbosity'] > 0:
+ Logs.pprint('GREEN', str(env) + '\n')
+ self.recurse([self.run_dir])
+
+ def src_path(self, path):
+ return os.path.relpath(os.path.join(str(self.path), path))
+
+ def args(self, **kwargs):
+ all_kwargs = self.defaults().copy()
+ all_kwargs.update(kwargs)
+ return all_kwargs
+
+ def group(self, name, **kwargs):
+ return TestGroup(
+ self, self.stack[-1].name, name, **self.args(**kwargs))
+
+ def set_test_defaults(self, **kwargs):
+ """Set default arguments to be passed to all tests"""
+ self.stack[-1].defaults.update(kwargs)
+
+ def clear_coverage(self):
+ """Zero old coverage data"""
+ try:
+ with open('cov-clear.log', 'w') as log:
+ subprocess.call(['lcov', '-z', '-d', str(self.path)],
+ stdout=log, stderr=log)
+
+ except Exception:
+ Logs.warn('Failed to run lcov to clear old coverage data')
+
+ def gen_coverage(self):
+ """Generate coverage data and report"""
+ try:
+ with open('cov.lcov', 'w') as out:
+ with open('cov.log', 'w') as err:
+ subprocess.call(['lcov', '-c', '--no-external',
+ '--rc', 'lcov_branch_coverage=1',
+ '-b', '.',
+ '-d', str(self.path)],
+ stdout=out, stderr=err)
+
+ if not os.path.isdir('coverage'):
+ os.makedirs('coverage')
+
+ with open('genhtml.log', 'w') as log:
+ subprocess.call(['genhtml',
+ '-o', 'coverage',
+ '--rc', 'genhtml_branch_coverage=1',
+ 'cov.lcov'],
+ stdout=log, stderr=log)
+
+ except Exception:
+ Logs.warn('Failed to run lcov to generate coverage report')
+
+class TestGroup:
+ def __init__(self, tst, suitename, name, **kwargs):
+ self.tst = tst
+ self.suitename = suitename
+ self.name = name
+ self.kwargs = kwargs
+ self.start_time = bench_time()
+ tst.stack.append(TestScope(tst, name, tst.defaults()))
+
+ def label(self):
+ return self.suitename + '.%s' % self.name if self.name else ''
+
+ def args(self, **kwargs):
+ all_kwargs = self.tst.args(**self.kwargs)
+ all_kwargs.update(kwargs)
+ return all_kwargs
+
+ def __enter__(self):
+ if 'verbosity' in self.kwargs and self.kwargs['verbosity'] > 0:
+ self.tst.log_good('-' * 10, self.label())
+ return self
+
+ def __call__(self, test, **kwargs):
+ return self.tst(test, **self.args(**kwargs))
+
+ def file_equals(self, from_path, to_path, **kwargs):
+ return self.tst.file_equals(from_path, to_path, **kwargs)
+
+ def __exit__(self, type, value, traceback):
+ duration = (bench_time() - self.start_time) * 1000.0
+ scope = self.tst.pop()
+ n_passed = scope.n_total - scope.n_failed
+ if scope.n_failed == 0:
+ self.tst.log_good('-' * 10, '%d tests from %s (%d ms total)',
+ scope.n_total, self.label(), duration)
+ else:
+ self.tst.log_bad('-' * 10, '%d/%d tests from %s (%d ms total)',
+ n_passed, scope.n_total, self.label(), duration)
def run_ldconfig(ctx):
should_run = (ctx.cmd == 'install' and