aboutsummaryrefslogtreecommitdiffstats
path: root/waflib/extras
diff options
context:
space:
mode:
Diffstat (limited to 'waflib/extras')
-rw-r--r--waflib/extras/autowaf.py42
1 files changed, 36 insertions, 6 deletions
diff --git a/waflib/extras/autowaf.py b/waflib/extras/autowaf.py
index d7799670..5feef036 100644
--- a/waflib/extras/autowaf.py
+++ b/waflib/extras/autowaf.py
@@ -93,6 +93,9 @@ def set_options(opt, debug_by_default=False):
test_opts.add_option('--wrapper', type='string',
dest='test_wrapper',
help='command prefix for tests (e.g. valgrind)')
+ test_opts.add_option('--test-filter', type='string',
+ dest='test_filter',
+ help='regular expression for tests to run')
# Run options
run_opts = opt.add_option_group('Run options')
@@ -539,7 +542,7 @@ def build_pc(bld, name, version, version_suffix, libs, subst_dict={}):
"""
pkg_prefix = bld.env['PREFIX']
- if pkg_prefix[-1] == '/':
+ if len(pkg_prefix) > 1 and pkg_prefix[-1] == '/':
pkg_prefix = pkg_prefix[:-1]
target = name.lower()
@@ -866,12 +869,26 @@ class TestScope:
self.n_total = 0
def run(self, test, **kwargs):
+ if type(test) == list and 'name' not in kwargs:
+ import pipes
+ kwargs['name'] = ' '.join(map(pipes.quote, test))
+
+ if Options.options.test_filter and 'name' in kwargs:
+ import re
+ found = False
+ for scope in self.tst.stack:
+ if re.search(Options.options.test_filter, scope.name):
+ found = True
+ break
+
+ if (not found and
+ not re.search(Options.options.test_filter, self.name) and
+ not re.search(Options.options.test_filter, kwargs['name'])):
+ return True
+
if callable(test):
output = self._run_callable(test, **kwargs)
elif type(test) == list:
- if 'name' not in kwargs:
- import pipes
- kwargs['name'] = ' '.join(map(pipes.quote, test))
output = self._run_command(test, **kwargs)
else:
@@ -994,7 +1011,7 @@ class TestContext(Build.BuildContext):
Logs.info("Waf: Entering directory `%s'\n", bld_dir)
os.chdir(str(bld_dir))
- if str(node.parent) == Context.top_dir:
+ if not self.env.NO_COVERAGE and str(node.parent) == Context.top_dir:
self.clear_coverage()
self.log_good('=' * 10, 'Running %s tests', group_name)
@@ -1029,7 +1046,7 @@ class TestContext(Build.BuildContext):
self.gen_coverage()
if os.path.exists('coverage/index.html'):
- self.log_good('COVERAGE', '<file://%s>',
+ self.log_good('REPORT', '<file://%s>',
os.path.abspath('coverage/index.html'))
successes = scope.n_total - scope.n_failed
@@ -1101,6 +1118,19 @@ class TestContext(Build.BuildContext):
'cov.lcov'],
stdout=log, stderr=log)
+ summary = subprocess.check_output(
+ ['lcov', '--summary',
+ '--rc', 'lcov_branch_coverage=1',
+ 'cov.lcov'],
+ stderr=subprocess.STDOUT).decode('ascii')
+
+ import re
+ lines = re.search('lines\.*: (.*)%.*', summary).group(1)
+ functions = re.search('functions\.*: (.*)%.*', summary).group(1)
+ branches = re.search('branches\.*: (.*)%.*', summary).group(1)
+ self.log_good('COVERAGE', '%s%% lines, %s%% functions, %s%% branches',
+ lines, functions, branches)
+
except Exception:
Logs.warn('Failed to run lcov to generate coverage report')