From 7983a5aae615290d04fd43cbc2752f8cf4a46d10 Mon Sep 17 00:00:00 2001 From: David Robillard Date: Sat, 12 Jan 2019 20:32:07 +0100 Subject: Squashed 'waflib/' changes from 3e43d09..915dcb1 915dcb1 Report test time correctly 26f0a2b Remove redundant verbose-tests option fc35c2c Fix MSVC CFLAGS 5ea8f99 Improve test output spacing 0e23b29 Raise exception when test suite fails to ensure non-zero exit status d6de073 Show run time of unit tests 5b65554 Add short configure option for ultra-strict flags 4687ba6 Use gtest-like test output 258903d Fix failure count in test group summaries da07e73 Fix verbose tests with Python 3 6e726eb Add support for suppressing warnings from system libraries git-subtree-dir: waflib git-subtree-split: 915dcb1778a592ef54a6b7e14fa8a6bd1ca3376c --- waflib/extras/autowaf.py | 116 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 80 insertions(+), 36 deletions(-) (limited to 'waflib/extras') diff --git a/waflib/extras/autowaf.py b/waflib/extras/autowaf.py index be3bfe2..92d0e57 100644 --- a/waflib/extras/autowaf.py +++ b/waflib/extras/autowaf.py @@ -2,6 +2,7 @@ import glob import os import subprocess import sys +import time from waflib import Build, Context, Logs, Options, Utils from waflib.TaskGen import feature, before, after @@ -69,7 +70,7 @@ def set_options(opt, debug_by_default=False, test=False): opts.add_option('-s', '--strict', action='store_true', default=False, dest='strict', help="use strict compiler flags and show all warnings") - opts.add_option('--ultra-strict', action='store_true', default=False, + opts.add_option('-S', '--ultra-strict', action='store_true', default=False, dest='ultra_strict', help="use extremely strict compiler flags (likely noisy)") opts.add_option('--docs', action='store_true', default=False, dest='docs', @@ -86,9 +87,6 @@ def set_options(opt, debug_by_default=False, test=False): test_opts.add_option('--wrapper', type='string', dest='test_wrapper', help='command prefix for tests (e.g. valgrind)') - test_opts.add_option('--verbose-tests', action='store_true', - default=False, dest='verbose_tests', - help='always show test output') g_step = 1 @@ -180,6 +178,16 @@ def check_pkg(conf, name, **args): else: conf.env[var_name] = CheckType.OPTIONAL + if not conf.env.MSVC_COMPILER and 'system' in args and args['system']: + includes = conf.env['INCLUDES_' + nameify(args['uselib_store'])] + for path in includes: + if 'COMPILER_CC' in conf.env: + conf.env.append_value('CFLAGS', ['-isystem', path]) + if 'COMPILER_CXX' in conf.env: + conf.env.append_value('CXXFLAGS', ['-isystem', path]) + + conf.env.append_value('CXXFLAGS', ['-isystem', '/usr/local/include']) + def normpath(path): if sys.platform == 'win32': return os.path.normpath(path).replace('\\', '/') @@ -238,16 +246,37 @@ def configure(conf): conf.env['CXXFLAGS'] = ['-O0', '-g'] else: if conf.env['MSVC_COMPILER']: - conf.env['CFLAGS'] = ['/MD', '/FS', '/DNDEBUG'] - conf.env['CXXFLAGS'] = ['/MD', '/FS', '/DNDEBUG'] + append_cxx_flags(['/MD', '/FS', '/DNDEBUG']) else: append_cxx_flags(['-DNDEBUG']) if conf.env.MSVC_COMPILER: Options.options.no_coverage = True - if Options.options.strict: - conf.env.append_value('CFLAGS', ['/Wall']) - conf.env.append_value('CXXFLAGS', ['/Wall']) + append_cxx_flags(['/nologo', + '/FS', + '/DNDEBUG', + '/D_CRT_SECURE_NO_WARNINGS', + '/experimental:external', + '/external:W0', + '/external:anglebrackets']) + conf.env.append_value('LINKFLAGS', '/nologo') + if Options.options.strict or Options.options.ultra_strict: + ms_strict_flags = ['/Wall', + '/wd4061', + '/wd4200', + '/wd4514', + '/wd4571', + '/wd4625', + '/wd4626', + '/wd4706', + '/wd4710', + '/wd4820', + '/wd5026', + '/wd5027', + '/wd5045'] + conf.env.append_value('CFLAGS', ms_strict_flags) + conf.env.append_value('CXXFLAGS', ms_strict_flags) + conf.env.append_value('CXXFLAGS', ['/EHsc']) else: if Options.options.ultra_strict: Options.options.strict = True @@ -335,7 +364,7 @@ def set_c_lang(conf, lang): "Set a specific C language standard, like 'c99' or 'c11'" if conf.env.MSVC_COMPILER: # MSVC has no hope or desire to compile C99, just compile as C++ - conf.env.append_unique('CFLAGS', ['-TP']) + conf.env.append_unique('CFLAGS', ['/TP']) else: flag = '-std=%s' % lang conf.check(cflags=['-Werror', flag], @@ -359,7 +388,7 @@ def set_modern_c_flags(conf): if 'COMPILER_CC' in conf.env: if conf.env.MSVC_COMPILER: # MSVC has no hope or desire to compile C99, just compile as C++ - conf.env.append_unique('CFLAGS', ['-TP']) + conf.env.append_unique('CFLAGS', ['/TP']) else: for flag in ['-std=c11', '-std=c99']: if conf.check(cflags=['-Werror', flag], mandatory=False, @@ -721,8 +750,6 @@ def cd_to_build_dir(ctx, appname): os.chdir(os.path.join('build', appname)) else: os.chdir('build') - Logs.pprint('GREEN', ("Waf: Entering directory `%s'" % - os.path.abspath(os.getcwd()))) def cd_to_orig_dir(ctx, child): if child: @@ -730,8 +757,17 @@ def cd_to_orig_dir(ctx, child): else: os.chdir('..') +def bench_time(): + if hasattr(time, 'perf_counter'): # Added in Python 3.3 + return time.perf_counter() + else: + return time.time() + def pre_test(ctx, appname, dirs=['src']): + Logs.pprint('GREEN', '\n[==========] Running %s tests' % appname) + if not hasattr(ctx, 'autowaf_tests_total'): + ctx.autowaf_tests_start_time = bench_time() ctx.autowaf_tests_total = 0 ctx.autowaf_tests_failed = 0 ctx.autowaf_local_tests_total = 0 @@ -756,6 +792,9 @@ def pre_test(ctx, appname, dirs=['src']): finally: clear_log.close() +class TestFailed(Exception): + pass + def post_test(ctx, appname, dirs=['src'], remove=['*boost*', 'c++*']): if not ctx.env.NO_COVERAGE: diropts = '' @@ -796,21 +835,22 @@ def post_test(ctx, appname, dirs=['src'], remove=['*boost*', 'c++*']): coverage_lcov.close() coverage_log.close() - if ctx.autowaf_tests[appname]['failed'] > 0: - Logs.pprint('RED', '\nSummary: %d / %d %s tests failed' % ( - ctx.autowaf_tests[appname]['failed'], - ctx.autowaf_tests[appname]['total'], - appname)) - else: - Logs.pprint('GREEN', '\nSummary: All %d %s tests passed' % ( - ctx.autowaf_tests[appname]['total'], appname)) - + duration = (bench_time() - ctx.autowaf_tests_start_time) * 1000.0 + total_tests = ctx.autowaf_tests[appname]['total'] + failed_tests = ctx.autowaf_tests[appname]['failed'] + passed_tests = total_tests - failed_tests + Logs.pprint('GREEN', '\n[==========] %d tests from %s ran (%d ms total)' % ( + total_tests, appname, duration)) if not ctx.env.NO_COVERAGE: - Logs.pprint('GREEN', 'Coverage: \n' + Logs.pprint('GREEN', '[----------] Coverage: ' % os.path.abspath('coverage/index.html')) - Logs.pprint('GREEN', ("Waf: Leaving directory `%s'" % - os.path.abspath(os.getcwd()))) + Logs.pprint('GREEN', '[ PASSED ] %d tests' % passed_tests) + if failed_tests > 0: + Logs.pprint('RED', '[ FAILED ] %d tests' % failed_tests) + raise TestFailed('Tests from %s failed' % appname) + Logs.pprint('', '') + top_level = (len(ctx.stack_path) > 1) if top_level: cd_to_orig_dir(ctx, top_level) @@ -844,7 +884,7 @@ def run_test(ctx, if isinstance(test, type([])): s = ' '.join(test) if header and not quiet: - Logs.pprint('Green', '\n** Test %s' % s) + Logs.pprint('Green', '\n[ RUN ] %s' % s) cmd = test if Options.options.test_wrapper: cmd = Options.options.test_wrapper + ' ' + test @@ -859,17 +899,18 @@ def run_test(ctx, success = desired_status is None or returncode == desired_status if success: if not quiet: - Logs.pprint('GREEN', '** Pass %s' % name) + Logs.pprint('GREEN', '[ OK ] %s' % name) else: - Logs.pprint('RED', '** FAIL %s' % name) + Logs.pprint('RED', '[ FAILED ] %s' % name) ctx.autowaf_tests_failed += 1 + ctx.autowaf_local_tests_failed += 1 ctx.autowaf_tests[appname]['failed'] += 1 if type(test) != list and not callable(test): Logs.pprint('RED', test) - if Options.options.verbose_tests and type(test) != list and not callable(test): - sys.stdout.write(out[0]) - sys.stderr.write(out[1]) + if Options.options.verbose and type(test) != list and not callable(test): + sys.stdout.write(out[0].decode('utf-8')) + sys.stderr.write(out[1].decode('utf-8')) return (success, out) @@ -882,7 +923,8 @@ def tests_name(ctx, appname, name='*'): def begin_tests(ctx, appname, name='*'): ctx.autowaf_local_tests_failed = 0 ctx.autowaf_local_tests_total = 0 - Logs.pprint('GREEN', '\n** Begin %s tests' % ( + ctx.autowaf_local_tests_start_time = bench_time() + Logs.pprint('GREEN', '\n[----------] %s' % ( tests_name(ctx, appname, name))) class Handle: @@ -895,13 +937,15 @@ def begin_tests(ctx, appname, name='*'): return Handle() def end_tests(ctx, appname, name='*'): + duration = (bench_time() - ctx.autowaf_local_tests_start_time) * 1000.0 + total = ctx.autowaf_local_tests_total failures = ctx.autowaf_local_tests_failed if failures == 0: - Logs.pprint('GREEN', '** Passed all %d %s tests' % ( - ctx.autowaf_local_tests_total, tests_name(ctx, appname, name))) + Logs.pprint('GREEN', '[----------] %d tests from %s (%d ms total)' % ( + ctx.autowaf_local_tests_total, tests_name(ctx, appname, name), duration)) else: - Logs.pprint('RED', '** Failed %d / %d %s tests' % ( - failures, ctx.autowaf_local_tests_total, tests_name(ctx, appname, name))) + Logs.pprint('RED', '[----------] %d/%d tests from %s (%d ms total)' % ( + total - failures, total, tests_name(ctx, appname, name), duration)) def run_tests(ctx, appname, -- cgit v1.2.1