From 46ac21b8c413f73ad723499aa76ecea2f59cb57d Mon Sep 17 00:00:00 2001
From: David Robillard <d@drobilla.net>
Date: Sun, 17 Mar 2019 17:31:03 +0100
Subject: Squashed 'waflib/' changes from 915dcb17..e7a29b6b

e7a29b6b Upgrade to waf 2.0.15
8280f9de Add command for running executables from the build directory
8073c1ad Make make_simple_dox() safe in case of exception
70d03b82 Avoid use of global counter hacks for configuration display
b7d689a4 Rewrite test framework
94deadf0 Automatically add options and move add_flags() to options context
f4259ee4 Reduce system include path noise
927b6082 Automatically display configuration header
c44b8f3b Set line justification from a constant in the wscript
a48e26fd Automatically detect if wscript has a test hook
ef66724d Save runtime variables in the environment
63bcbcd3 Clean up TestContext
b1d95050 Add ExecutionContext for setting runtime environment
387c1dfa Add show_diff() and test_file_equals() utilities
29d4d293 Fix in-tree library paths
9fde01f4 Add custom configuration context
6d3612fd Add lib_path_name constant

git-subtree-dir: waflib
git-subtree-split: e7a29b6b9b2f842314244c23c14d8f8f560904e1
---
 Build.py                 |  11 +-
 ConfigSet.py             |   4 +-
 Configure.py             |   3 +-
 Context.py               |  14 +-
 Logs.py                  |   4 +-
 Runner.py                |   8 +-
 Scripting.py             |  10 +-
 Task.py                  |  58 ++--
 TaskGen.py               |   4 +-
 Tools/c_config.py        |   4 +-
 Tools/c_preproc.py       |   6 +-
 Tools/ccroot.py          |  19 +-
 Tools/d_scan.py          |   8 +-
 Tools/fc.py              |  22 +-
 Tools/fc_config.py       |   4 +-
 Tools/fc_scan.py         |  12 +-
 Tools/ifort.py           |   2 +-
 Tools/javaw.py           | 131 ++++++++-
 Tools/md5_tstamp.py      |   3 +-
 Tools/msvc.py            |  16 +-
 Tools/python.py          |   4 +
 Tools/qt5.py             |   8 +-
 Tools/waf_unit_test.py   |   2 +-
 Tools/winres.py          |   4 +-
 Utils.py                 |  18 +-
 ansiterm.py              |   2 +-
 extras/autowaf.py        | 681 +++++++++++++++++++++++++++++------------------
 extras/buildcopy.py      |   7 +-
 extras/cpplint.py        |   6 +-
 extras/cython.py         |  13 +-
 extras/distnet.py        |   2 +-
 extras/erlang.py         |   2 +-
 extras/fast_partial.py   |   2 +-
 extras/fc_nfort.py       |  52 ++++
 extras/gccdeps.py        |   6 +-
 extras/kde4.py           |   2 +-
 extras/lv2.py            |  32 ++-
 extras/ocaml.py          |   2 +-
 extras/parallel_debug.py |   9 +-
 extras/pgicc.py          |   2 +-
 extras/protoc.py         |  92 +++----
 extras/pyqt5.py          |   4 +-
 extras/qt4.py            |   6 +-
 extras/remote.py         |   2 +-
 extras/run_do_script.py  |   2 +-
 extras/swig.py           |   4 +-
 46 files changed, 871 insertions(+), 438 deletions(-)
 create mode 100644 extras/fc_nfort.py

diff --git a/Build.py b/Build.py
index 1afcba64..8143dbcc 100644
--- a/Build.py
+++ b/Build.py
@@ -104,7 +104,7 @@ class BuildContext(Context.Context):
 		"""Amount of jobs to run in parallel"""
 
 		self.targets = Options.options.targets
-		"""List of targets to build (default: \*)"""
+		"""List of targets to build (default: \\*)"""
 
 		self.keep = Options.options.keep
 		"""Whether the build should continue past errors"""
@@ -1055,12 +1055,16 @@ class inst(Task.Task):
 		"""
 		Returns the destination path where files will be installed, pre-pending `destdir`.
 
+		Relative paths will be interpreted relative to `PREFIX` if no `destdir` is given.
+
 		:rtype: string
 		"""
 		if isinstance(self.install_to, Node.Node):
 			dest = self.install_to.abspath()
 		else:
-			dest = Utils.subst_vars(self.install_to, self.env)
+			dest = os.path.normpath(Utils.subst_vars(self.install_to, self.env))
+		if not os.path.isabs(dest):
+		    dest = os.path.join(self.env.PREFIX, dest)
 		if destdir and Options.options.destdir:
 			dest = os.path.join(Options.options.destdir, os.path.splitdrive(dest)[1].lstrip(os.sep))
 		return dest
@@ -1314,7 +1318,8 @@ class CleanContext(BuildContext):
 			lst = []
 			for env in self.all_envs.values():
 				lst.extend(self.root.find_or_declare(f) for f in env[CFG_FILES])
-			for n in self.bldnode.ant_glob('**/*', excl='.lock* *conf_check_*/** config.log c4che/*', quiet=True):
+			excluded_dirs = '.lock* *conf_check_*/** config.log %s/*' % CACHE_DIR
+			for n in self.bldnode.ant_glob('**/*', excl=excluded_dirs, quiet=True):
 				if n in lst:
 					continue
 				n.delete()
diff --git a/ConfigSet.py b/ConfigSet.py
index b300bb56..901fba6c 100644
--- a/ConfigSet.py
+++ b/ConfigSet.py
@@ -11,7 +11,7 @@ The values put in :py:class:`ConfigSet` must be serializable (dicts, lists, stri
 
 import copy, re, os
 from waflib import Logs, Utils
-re_imp = re.compile('^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
+re_imp = re.compile(r'^(#)*?([^#=]*?)\ =\ (.*?)$', re.M)
 
 class ConfigSet(object):
 	"""
@@ -312,7 +312,7 @@ class ConfigSet(object):
 		:type filename: string
 		"""
 		tbl = self.table
-		code = Utils.readf(filename, m='rU')
+		code = Utils.readf(filename, m='r')
 		for m in re_imp.finditer(code):
 			g = m.group
 			tbl[g(2)] = eval(g(3))
diff --git a/Configure.py b/Configure.py
index d0a4793a..db09c0e3 100644
--- a/Configure.py
+++ b/Configure.py
@@ -125,7 +125,7 @@ class ConfigurationContext(Context.Context):
 		self.bldnode.mkdir()
 
 		if not os.path.isdir(self.bldnode.abspath()):
-			conf.fatal('Could not create the build directory %s' % self.bldnode.abspath())
+			self.fatal('Could not create the build directory %s' % self.bldnode.abspath())
 
 	def execute(self):
 		"""
@@ -180,6 +180,7 @@ class ConfigurationContext(Context.Context):
 		env.hash = self.hash
 		env.files = self.files
 		env.environ = dict(self.environ)
+		env.launch_dir = Context.launch_dir
 
 		if not (self.env.NO_LOCK_IN_RUN or env.environ.get('NO_LOCK_IN_RUN') or getattr(Options.options, 'no_lock_in_run')):
 			env.store(os.path.join(Context.run_dir, Options.lockfile))
diff --git a/Context.py b/Context.py
index bb47c921..876ea46b 100644
--- a/Context.py
+++ b/Context.py
@@ -11,13 +11,13 @@ from waflib import Utils, Errors, Logs
 import waflib.Node
 
 # the following 3 constants are updated on each new release (do not touch)
-HEXVERSION=0x2000b00
+HEXVERSION=0x2000f00
 """Constant updated on new releases"""
 
-WAFVERSION="2.0.11"
+WAFVERSION="2.0.15"
 """Constant updated on new releases"""
 
-WAFREVISION="a97f6fb0941091b4966b625f15ec32fa783a8bec"
+WAFREVISION="503db290b73ef738a495e0d116d6f8ee0b98dcc2"
 """Git revision when the waf version is updated"""
 
 ABI = 20
@@ -266,7 +266,7 @@ class Context(ctx):
 				cache[node] = True
 				self.pre_recurse(node)
 				try:
-					function_code = node.read('rU', encoding)
+					function_code = node.read('r', encoding)
 					exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict)
 				finally:
 					self.post_recurse(node)
@@ -613,7 +613,7 @@ class Context(ctx):
 		is typically called once for a programming language group, see for
 		example :py:mod:`waflib.Tools.compiler_c`
 
-		:param var: glob expression, for example 'cxx\_\*.py'
+		:param var: glob expression, for example 'cxx\\_\\*.py'
 		:type var: string
 		:param ban: list of exact file names to exclude
 		:type ban: list of string
@@ -662,7 +662,7 @@ def load_module(path, encoding=None):
 
 	module = imp.new_module(WSCRIPT_FILE)
 	try:
-		code = Utils.readf(path, m='rU', encoding=encoding)
+		code = Utils.readf(path, m='r', encoding=encoding)
 	except EnvironmentError:
 		raise Errors.WafError('Could not read the file %r' % path)
 
@@ -678,7 +678,7 @@ def load_module(path, encoding=None):
 
 def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True):
 	"""
-	Importx a Waf tool as a python module, and stores it in the dict :py:const:`waflib.Context.Context.tools`
+	Imports a Waf tool as a python module, and stores it in the dict :py:const:`waflib.Context.Context.tools`
 
 	:type  tool: string
 	:param tool: Name of the tool
diff --git a/Logs.py b/Logs.py
index 2a475169..11dc34f3 100644
--- a/Logs.py
+++ b/Logs.py
@@ -276,9 +276,9 @@ def error(*k, **kw):
 
 def warn(*k, **kw):
 	"""
-	Wraps logging.warn
+	Wraps logging.warning
 	"""
-	log.warn(*k, **kw)
+	log.warning(*k, **kw)
 
 def info(*k, **kw):
 	"""
diff --git a/Runner.py b/Runner.py
index 261084d2..5d276698 100644
--- a/Runner.py
+++ b/Runner.py
@@ -37,6 +37,8 @@ class PriorityTasks(object):
 		return len(self.lst)
 	def __iter__(self):
 		return iter(self.lst)
+	def __str__(self):
+		return 'PriorityTasks: [%s]' % '\n  '.join(str(x) for x in self.lst)
 	def clear(self):
 		self.lst = []
 	def append(self, task):
@@ -181,10 +183,12 @@ class Parallel(object):
 		The reverse dependency graph of dependencies obtained from Task.run_after
 		"""
 
-		self.spawner = Spawner(self)
+		self.spawner = None
 		"""
 		Coordinating daemon thread that spawns thread consumers
 		"""
+		if self.numjobs > 1:
+			self.spawner = Spawner(self)
 
 	def get_next_task(self):
 		"""
@@ -254,6 +258,8 @@ class Parallel(object):
 							self.outstanding.append(x)
 							break
 					else:
+						if self.stop or self.error:
+							break
 						raise Errors.WafError('Broken revdeps detected on %r' % self.incomplete)
 				else:
 					tasks = next(self.biter)
diff --git a/Scripting.py b/Scripting.py
index 749d4f2e..ae17a8b4 100644
--- a/Scripting.py
+++ b/Scripting.py
@@ -216,7 +216,10 @@ def parse_options():
 	ctx = Context.create_context('options')
 	ctx.execute()
 	if not Options.commands:
-		Options.commands.append(default_cmd)
+		if isinstance(default_cmd, list):
+			Options.commands.extend(default_cmd)
+		else:
+			Options.commands.append(default_cmd)
 	if Options.options.whelp:
 		ctx.parser.print_help()
 		sys.exit(0)
@@ -280,7 +283,7 @@ def distclean_dir(dirname):
 			pass
 
 	try:
-		shutil.rmtree('c4che')
+		shutil.rmtree(Build.CACHE_DIR)
 	except OSError:
 		pass
 
@@ -598,12 +601,15 @@ def autoconfigure(execute_method):
 			cmd = env.config_cmd or 'configure'
 			if Configure.autoconfig == 'clobber':
 				tmp = Options.options.__dict__
+				launch_dir_tmp = Context.launch_dir
 				if env.options:
 					Options.options.__dict__ = env.options
+				Context.launch_dir = env.launch_dir
 				try:
 					run_command(cmd)
 				finally:
 					Options.options.__dict__ = tmp
+					Context.launch_dir = launch_dir_tmp
 			else:
 				run_command(cmd)
 			run_command(self.cmd)
diff --git a/Task.py b/Task.py
index 0fc449d4..cb49a739 100644
--- a/Task.py
+++ b/Task.py
@@ -50,6 +50,9 @@ def f(tsk):
 	bld = gen.bld
 	cwdx = tsk.get_cwd()
 	p = env.get_flat
+	def to_list(xx):
+		if isinstance(xx, str): return [xx]
+		return xx
 	tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s
 	return tsk.exec_command(cmd, cwd=cwdx, env=env.env or None)
 '''
@@ -77,7 +80,8 @@ def f(tsk):
 
 COMPILE_TEMPLATE_SIG_VARS = '''
 def f(tsk):
-	super(tsk.__class__, tsk).sig_vars()
+	sig = tsk.generator.bld.hash_env_vars(tsk.env, tsk.vars)
+	tsk.m.update(sig)
 	env = tsk.env
 	gen = tsk.generator
 	bld = gen.bld
@@ -159,10 +163,10 @@ class Task(evil):
 	"""File extensions that objects of this task class may create"""
 
 	before = []
-	"""List of task class names to execute before instances of this class"""
+	"""The instances of this class are executed before the instances of classes whose names are in this list"""
 
 	after = []
-	"""List of task class names to execute after instances of this class"""
+	"""The instances of this class are executed after the instances of classes whose names are in this list"""
 
 	hcode = Utils.SIG_NIL
 	"""String representing an additional hash for the class representation"""
@@ -302,25 +306,31 @@ class Task(evil):
 		if hasattr(self, 'stderr'):
 			kw['stderr'] = self.stderr
 
-		# workaround for command line length limit:
-		# http://support.microsoft.com/kb/830473
-		if not isinstance(cmd, str) and (len(repr(cmd)) >= 8192 if Utils.is_win32 else len(cmd) > 200000):
-			cmd, args = self.split_argfile(cmd)
-			try:
-				(fd, tmp) = tempfile.mkstemp()
-				os.write(fd, '\r\n'.join(args).encode())
-				os.close(fd)
-				if Logs.verbose:
-					Logs.debug('argfile: @%r -> %r', tmp, args)
-				return self.generator.bld.exec_command(cmd + ['@' + tmp], **kw)
-			finally:
+		if not isinstance(cmd, str):
+			if Utils.is_win32:
+				# win32 compares the resulting length http://support.microsoft.com/kb/830473
+				too_long = sum([len(arg) for arg in cmd]) + len(cmd) > 8192
+			else:
+				# non-win32 counts the amount of arguments (200k)
+				too_long = len(cmd) > 200000
+
+			if too_long and getattr(self, 'allow_argsfile', True):
+				# Shunt arguments to a temporary file if the command is too long.
+				cmd, args = self.split_argfile(cmd)
 				try:
-					os.remove(tmp)
-				except OSError:
-					# anti-virus and indexers can keep files open -_-
-					pass
-		else:
-			return self.generator.bld.exec_command(cmd, **kw)
+					(fd, tmp) = tempfile.mkstemp()
+					os.write(fd, '\r\n'.join(args).encode())
+					os.close(fd)
+					if Logs.verbose:
+						Logs.debug('argfile: @%r -> %r', tmp, args)
+					return self.generator.bld.exec_command(cmd + ['@' + tmp], **kw)
+				finally:
+					try:
+						os.remove(tmp)
+					except OSError:
+						# anti-virus and indexers can keep files open -_-
+						pass
+		return self.generator.bld.exec_command(cmd, **kw)
 
 	def process(self):
 		"""
@@ -776,6 +786,8 @@ class Task(evil):
 		Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.env` variables/values
 		When overriding this method, and if scriptlet expressions are used, make sure to follow
 		the code in :py:meth:`waflib.Task.Task.compile_sig_vars` to enable dependencies on scriptlet results.
+
+		This method may be replaced on subclasses by the metaclass to force dependencies on scriptlet code.
 		"""
 		sig = self.generator.bld.hash_env_vars(self.env, self.vars)
 		self.m.update(sig)
@@ -1038,7 +1050,7 @@ def funex(c):
 	exec(c, dc)
 	return dc['f']
 
-re_cond = re.compile('(?P<var>\w+)|(?P<or>\|)|(?P<and>&)')
+re_cond = re.compile(r'(?P<var>\w+)|(?P<or>\|)|(?P<and>&)')
 re_novar = re.compile(r'^(SRC|TGT)\W+.*?$')
 reg_act = re.compile(r'(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})', re.M)
 def compile_fun_shell(line):
@@ -1193,7 +1205,7 @@ def compile_fun_noshell(line):
 					# plain code such as ${tsk.inputs[0].abspath()}
 					call = '%s%s' % (var, code)
 					add_dvar(call)
-					app('gen.to_list(%s)' % call)
+					app('to_list(%s)' % call)
 			else:
 				# a plain variable such as # a plain variable like ${AR}
 				app('to_list(env[%r])' % var)
diff --git a/TaskGen.py b/TaskGen.py
index a74e6431..532b7d5c 100644
--- a/TaskGen.py
+++ b/TaskGen.py
@@ -74,7 +74,7 @@ class task_gen(object):
 		else:
 			self.bld = kw['bld']
 			self.env = self.bld.env.derive()
-			self.path = self.bld.path # emulate chdir when reading scripts
+			self.path = kw.get('path', self.bld.path) # by default, emulate chdir when reading scripts
 
 			# Provide a unique index per folder
 			# This is part of a measure to prevent output file name collisions
@@ -727,7 +727,7 @@ def sequence_order(self):
 	self.bld.prev = self
 
 
-re_m4 = re.compile('@(\w+)@', re.M)
+re_m4 = re.compile(r'@(\w+)@', re.M)
 
 class subst_pc(Task.Task):
 	"""
diff --git a/Tools/c_config.py b/Tools/c_config.py
index d2b3c0d8..d546be95 100644
--- a/Tools/c_config.py
+++ b/Tools/c_config.py
@@ -250,9 +250,9 @@ def exec_cfg(self, kw):
 	:type atleast_pkgconfig_version: string
 	:param package: package name, for example *gtk+-2.0*
 	:type package: string
-	:param uselib_store: if the test is successful, define HAVE\_*name*. It is also used to define *conf.env.FLAGS_name* variables.
+	:param uselib_store: if the test is successful, define HAVE\\_*name*. It is also used to define *conf.env.FLAGS_name* variables.
 	:type uselib_store: string
-	:param modversion: if provided, return the version of the given module and define *name*\_VERSION
+	:param modversion: if provided, return the version of the given module and define *name*\\_VERSION
 	:type modversion: string
 	:param args: arguments to give to *package* when retrieving flags
 	:type args: list of string
diff --git a/Tools/c_preproc.py b/Tools/c_preproc.py
index 7e04b4a7..68e5f5ae 100644
--- a/Tools/c_preproc.py
+++ b/Tools/c_preproc.py
@@ -75,13 +75,13 @@ re_lines = re.compile(
 	re.IGNORECASE | re.MULTILINE)
 """Match #include lines"""
 
-re_mac = re.compile("^[a-zA-Z_]\w*")
+re_mac = re.compile(r"^[a-zA-Z_]\w*")
 """Match macro definitions"""
 
 re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]')
 """Match macro functions"""
 
-re_pragma_once = re.compile('^\s*once\s*', re.IGNORECASE)
+re_pragma_once = re.compile(r'^\s*once\s*', re.IGNORECASE)
 """Match #pragma once statements"""
 
 re_nl = re.compile('\\\\\r*\n', re.MULTILINE)
@@ -660,7 +660,7 @@ def extract_macro(txt):
 			# empty define, assign an empty token
 			return (v, [[], [('T','')]])
 
-re_include = re.compile('^\s*(<(?:.*)>|"(?:.*)")')
+re_include = re.compile(r'^\s*(<(?:.*)>|"(?:.*)")')
 def extract_include(txt, defs):
 	"""
 	Process a line in the form::
diff --git a/Tools/ccroot.py b/Tools/ccroot.py
index cfef8bf5..579d5b2b 100644
--- a/Tools/ccroot.py
+++ b/Tools/ccroot.py
@@ -111,7 +111,7 @@ def apply_incpaths(self):
 		tg = bld(features='includes', includes='.')
 
 	The folders only need to be relative to the current directory, the equivalent build directory is
-	added automatically (for headers created in the build directory). This enable using a build directory
+	added automatically (for headers created in the build directory). This enables using a build directory
 	or not (``top == out``).
 
 	This method will add a list of nodes read by :py:func:`waflib.Tools.ccroot.to_incnodes` in ``tg.env.INCPATHS``,
@@ -238,6 +238,17 @@ def rm_tgt(cls):
 	setattr(cls, 'run', wrap)
 rm_tgt(stlink_task)
 
+@feature('skip_stlib_link_deps')
+@before_method('process_use')
+def apply_skip_stlib_link_deps(self):
+	"""
+	This enables an optimization in the :py:func:wafilb.Tools.ccroot.processes_use: method that skips dependency and
+	link flag optimizations for targets that generate static libraries (via the :py:class:Tools.ccroot.stlink_task task).
+	The actual behavior is implemented in :py:func:wafilb.Tools.ccroot.processes_use: method so this feature only tells waf
+	to enable the new behavior.
+	"""
+	self.env.SKIP_STLIB_LINK_DEPS = True
+
 @feature('c', 'cxx', 'd', 'fc', 'asm')
 @after_method('process_source')
 def apply_link(self):
@@ -386,7 +397,11 @@ def process_use(self):
 		y = self.bld.get_tgen_by_name(x)
 		var = y.tmp_use_var
 		if var and link_task:
-			if var == 'LIB' or y.tmp_use_stlib or x in names:
+			if self.env.SKIP_STLIB_LINK_DEPS and isinstance(link_task, stlink_task):
+				# If the skip_stlib_link_deps feature is enabled then we should
+				# avoid adding lib deps to the stlink_task instance.
+				pass
+			elif var == 'LIB' or y.tmp_use_stlib or x in names:
 				self.env.append_value(var, [y.target[y.target.rfind(os.sep) + 1:]])
 				self.link_task.dep_nodes.extend(y.link_task.outputs)
 				tmp_path = y.link_task.outputs[0].parent.path_from(self.get_cwd())
diff --git a/Tools/d_scan.py b/Tools/d_scan.py
index 14c6c313..4e807a6b 100644
--- a/Tools/d_scan.py
+++ b/Tools/d_scan.py
@@ -93,8 +93,8 @@ class d_parser(object):
 
 		self.allnames = []
 
-		self.re_module = re.compile("module\s+([^;]+)")
-		self.re_import = re.compile("import\s+([^;]+)")
+		self.re_module = re.compile(r"module\s+([^;]+)")
+		self.re_import = re.compile(r"import\s+([^;]+)")
 		self.re_import_bindings = re.compile("([^:]+):(.*)")
 		self.re_import_alias = re.compile("[^=]+=(.+)")
 
@@ -138,7 +138,7 @@ class d_parser(object):
 
 		mod_name = self.re_module.search(code)
 		if mod_name:
-			self.module = re.sub('\s+', '', mod_name.group(1)) # strip all whitespaces
+			self.module = re.sub(r'\s+', '', mod_name.group(1)) # strip all whitespaces
 
 		# go through the code, have a look at all import occurrences
 
@@ -146,7 +146,7 @@ class d_parser(object):
 		import_iterator = self.re_import.finditer(code)
 		if import_iterator:
 			for import_match in import_iterator:
-				import_match_str = re.sub('\s+', '', import_match.group(1)) # strip all whitespaces
+				import_match_str = re.sub(r'\s+', '', import_match.group(1)) # strip all whitespaces
 
 				# does this end with an import bindings declaration?
 				# (import bindings always terminate the list of imports)
diff --git a/Tools/fc.py b/Tools/fc.py
index d9e8d8c4..fd4d39c9 100644
--- a/Tools/fc.py
+++ b/Tools/fc.py
@@ -28,10 +28,24 @@ def modfile(conf, name):
 	Turns a module name into the right module file name.
 	Defaults to all lower case.
 	"""
-	return {'lower'     :name.lower() + '.mod',
-		'lower.MOD' :name.lower() + '.MOD',
-		'UPPER.mod' :name.upper() + '.mod',
-		'UPPER'     :name.upper() + '.MOD'}[conf.env.FC_MOD_CAPITALIZATION or 'lower']
+	if name.find(':') >= 0:
+		# Depending on a submodule!
+		separator = conf.env.FC_SUBMOD_SEPARATOR or '@'
+		# Ancestors of the submodule will be prefixed to the
+		# submodule name, separated by a colon.
+		modpath = name.split(':')
+		# Only the ancestor (actual) module and the submodule name
+		# will be used for the filename.
+		modname = modpath[0] + separator + modpath[-1]
+		suffix = conf.env.FC_SUBMOD_SUFFIX or '.smod'
+	else:
+		modname = name
+		suffix = '.mod'
+
+	return {'lower'     :modname.lower() + suffix.lower(),
+		'lower.MOD' :modname.lower() + suffix.upper(),
+		'UPPER.mod' :modname.upper() + suffix.lower(),
+		'UPPER'     :modname.upper() + suffix.upper()}[conf.env.FC_MOD_CAPITALIZATION or 'lower']
 
 def get_fortran_tasks(tsk):
 	"""
diff --git a/Tools/fc_config.py b/Tools/fc_config.py
index 222f3a55..dc5e5c9e 100644
--- a/Tools/fc_config.py
+++ b/Tools/fc_config.py
@@ -178,8 +178,8 @@ def check_fortran_dummy_main(self, *k, **kw):
 # ------------------------------------------------------------------------
 
 GCC_DRIVER_LINE = re.compile('^Driving:')
-POSIX_STATIC_EXT = re.compile('\S+\.a')
-POSIX_LIB_FLAGS = re.compile('-l\S+')
+POSIX_STATIC_EXT = re.compile(r'\S+\.a')
+POSIX_LIB_FLAGS = re.compile(r'-l\S+')
 
 @conf
 def is_link_verbose(self, txt):
diff --git a/Tools/fc_scan.py b/Tools/fc_scan.py
index 12cb0fc0..0824c92b 100644
--- a/Tools/fc_scan.py
+++ b/Tools/fc_scan.py
@@ -5,13 +5,15 @@
 
 import re
 
-INC_REGEX = """(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
-USE_REGEX = """(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"""
-MOD_REGEX = """(?:^|;)\s*MODULE(?!\s*PROCEDURE)(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"""
+INC_REGEX = r"""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
+USE_REGEX = r"""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"""
+MOD_REGEX = r"""(?:^|;)\s*MODULE(?!\s+(?:PROCEDURE|SUBROUTINE|FUNCTION))\s+(\w+)"""
+SMD_REGEX = r"""(?:^|;)\s*SUBMODULE\s*\(([\w:]+)\)\s*(\w+)"""
 
 re_inc = re.compile(INC_REGEX, re.I)
 re_use = re.compile(USE_REGEX, re.I)
 re_mod = re.compile(MOD_REGEX, re.I)
+re_smd = re.compile(SMD_REGEX, re.I)
 
 class fortran_parser(object):
 	"""
@@ -58,6 +60,10 @@ class fortran_parser(object):
 			m = re_mod.search(line)
 			if m:
 				mods.append(m.group(1))
+			m = re_smd.search(line)
+			if m:
+				uses.append(m.group(1))
+				mods.append('{0}:{1}'.format(m.group(1),m.group(2)))
 		return (incs, uses, mods)
 
 	def start(self, node):
diff --git a/Tools/ifort.py b/Tools/ifort.py
index 74934f3f..17d30529 100644
--- a/Tools/ifort.py
+++ b/Tools/ifort.py
@@ -107,7 +107,7 @@ def gather_ifort_versions(conf, versions):
 	"""
 	List compiler versions by looking up registry keys
 	"""
-	version_pattern = re.compile('^...?.?\....?.?')
+	version_pattern = re.compile(r'^...?.?\....?.?')
 	try:
 		all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\Fortran')
 	except OSError:
diff --git a/Tools/javaw.py b/Tools/javaw.py
index f6fd20cc..9daed395 100644
--- a/Tools/javaw.py
+++ b/Tools/javaw.py
@@ -24,12 +24,95 @@ You would have to run::
    java -jar /path/to/jython.jar waf configure
 
 [1] http://www.jython.org/
+
+Usage
+=====
+
+Load the "java" tool.
+
+def configure(conf):
+	conf.load('java')
+
+Java tools will be autodetected and eventually, if present, the quite
+standard JAVA_HOME environment variable will be used. The also standard
+CLASSPATH variable is used for library searching.
+
+In configuration phase checks can be done on the system environment, for
+example to check if a class is known in the classpath::
+
+	conf.check_java_class('java.io.FileOutputStream')
+
+or if the system supports JNI applications building::
+
+	conf.check_jni_headers()
+
+
+The java tool supports compiling java code, creating jar files and
+creating javadoc documentation. This can be either done separately or
+together in a single definition. For example to manage them separately::
+
+	bld(features  = 'javac',
+		srcdir    = 'src',
+		compat    = '1.7',
+		use       = 'animals',
+		name      = 'cats-src',
+	)
+
+	bld(features  = 'jar',
+		basedir   = '.',
+		destfile  = '../cats.jar',
+		name      = 'cats',
+		use       = 'cats-src'
+	)
+
+
+Or together by defining all the needed attributes::
+
+	bld(features   = 'javac jar javadoc',
+		srcdir     = 'src/',  # folder containing the sources to compile
+		outdir     = 'src',   # folder where to output the classes (in the build directory)
+		compat     = '1.6',   # java compatibility version number
+		classpath  = ['.', '..'],
+
+		# jar
+		basedir    = 'src', # folder containing the classes and other files to package (must match outdir)
+		destfile   = 'foo.jar', # do not put the destfile in the folder of the java classes!
+		use        = 'NNN',
+		jaropts    = ['-C', 'default/src/', '.'], # can be used to give files
+		manifest   = 'src/Manifest.mf', # Manifest file to include
+
+		# javadoc
+		javadoc_package = ['com.meow' , 'com.meow.truc.bar', 'com.meow.truc.foo'],
+		javadoc_output  = 'javadoc',
+	)
+
+External jar dependencies can be mapped to a standard waf "use" dependency by
+setting an environment variable with a CLASSPATH prefix in the configuration,
+for example::
+
+	conf.env.CLASSPATH_NNN = ['aaaa.jar', 'bbbb.jar']
+
+and then NNN can be freely used in rules as::
+
+	use        = 'NNN',
+
+In the java tool the dependencies via use are not transitive by default, as
+this necessity depends on the code. To enable recursive dependency scanning
+use on a specific rule:
+
+		recurse_use = True
+
+Or build-wise by setting RECURSE_JAVA:
+
+		bld.env.RECURSE_JAVA = True
+
+Unit tests can be integrated in the waf unit test environment using the javatest extra.
 """
 
 import os, shutil
 from waflib import Task, Utils, Errors, Node
 from waflib.Configure import conf
-from waflib.TaskGen import feature, before_method, after_method
+from waflib.TaskGen import feature, before_method, after_method, taskgen_method
 
 from waflib.Tools import ccroot
 ccroot.USELIB_VARS['javac'] = set(['CLASSPATH', 'JAVACFLAGS'])
@@ -107,6 +190,32 @@ def apply_java(self):
 	if names:
 		tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names])
 
+
+@taskgen_method
+def java_use_rec(self, name, **kw):
+	"""
+	Processes recursively the *use* attribute for each referred java compilation
+	"""
+	if name in self.tmp_use_seen:
+		return
+
+	self.tmp_use_seen.append(name)
+
+	try:
+		y = self.bld.get_tgen_by_name(name)
+	except Errors.WafError:
+		self.uselib.append(name)
+		return
+	else:
+		y.post()
+		# Add generated JAR name for CLASSPATH. Task ordering (set_run_after)
+		# is already guaranteed by ordering done between the single tasks
+		if hasattr(y, 'jar_task'):
+			self.use_lst.append(y.jar_task.outputs[0].abspath())
+
+	for x in self.to_list(getattr(y, 'use', [])):
+		self.java_use_rec(x)
+
 @feature('javac')
 @before_method('propagate_uselib_vars')
 @after_method('apply_java')
@@ -114,7 +223,8 @@ def use_javac_files(self):
 	"""
 	Processes the *use* attribute referring to other java compilations
 	"""
-	lst = []
+	self.use_lst = []
+	self.tmp_use_seen = []
 	self.uselib = self.to_list(getattr(self, 'uselib', []))
 	names = self.to_list(getattr(self, 'use', []))
 	get = self.bld.get_tgen_by_name
@@ -126,12 +236,17 @@ def use_javac_files(self):
 		else:
 			y.post()
 			if hasattr(y, 'jar_task'):
-				lst.append(y.jar_task.outputs[0].abspath())
+				self.use_lst.append(y.jar_task.outputs[0].abspath())
 				self.javac_task.set_run_after(y.jar_task)
 			else:
 				for tsk in y.tasks:
 					self.javac_task.set_run_after(tsk)
-	self.env.append_value('CLASSPATH', lst)
+
+		# If recurse use scan is enabled recursively add use attribute for each used one
+		if getattr(self, 'recurse_use', False) or self.bld.env.RECURSE_JAVA:
+			self.java_use_rec(x)
+
+	self.env.append_value('CLASSPATH', self.use_lst)
 
 @feature('javac')
 @after_method('apply_java', 'propagate_uselib_vars', 'use_javac_files')
@@ -245,7 +360,7 @@ class jar_create(JTask):
 				return Task.ASK_LATER
 		if not self.inputs:
 			try:
-				self.inputs = [x for x in self.basedir.ant_glob(JAR_RE, remove=False) if id(x) != id(self.outputs[0])]
+				self.inputs = [x for x in self.basedir.ant_glob(JAR_RE, remove=False, quiet=True) if id(x) != id(self.outputs[0])]
 			except Exception:
 				raise Errors.WafError('Could not find the basedir %r for %r' % (self.basedir, self))
 		return super(jar_create, self).runnable_status()
@@ -279,14 +394,14 @@ class javac(JTask):
 			self.inputs  = []
 			for x in self.srcdir:
 				if x.exists():
-					self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False))
+					self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False, quiet=True))
 		return super(javac, self).runnable_status()
 
 	def post_run(self):
 		"""
 		List class files created
 		"""
-		for node in self.generator.outdir.ant_glob('**/*.class'):
+		for node in self.generator.outdir.ant_glob('**/*.class', quiet=True):
 			self.generator.bld.node_sigs[node] = self.uid()
 		self.generator.bld.task_sigs[self.uid()] = self.cache_sig
 
@@ -338,7 +453,7 @@ class javadoc(Task.Task):
 		self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0)
 
 	def post_run(self):
-		nodes = self.generator.javadoc_output.ant_glob('**')
+		nodes = self.generator.javadoc_output.ant_glob('**', quiet=True)
 		for node in nodes:
 			self.generator.bld.node_sigs[node] = self.uid()
 		self.generator.bld.task_sigs[self.uid()] = self.cache_sig
diff --git a/Tools/md5_tstamp.py b/Tools/md5_tstamp.py
index 6428e460..2a587925 100644
--- a/Tools/md5_tstamp.py
+++ b/Tools/md5_tstamp.py
@@ -2,8 +2,7 @@
 # encoding: utf-8
 
 """
-Re-calculate md5 hashes of files only when the file times or the file
-size have changed.
+Re-calculate md5 hashes of files only when the file time have changed.
 
 The hashes can also reflect either the file contents (STRONGEST=True) or the
 file time and file size.
diff --git a/Tools/msvc.py b/Tools/msvc.py
index 17b347d4..ff58449d 100644
--- a/Tools/msvc.py
+++ b/Tools/msvc.py
@@ -281,7 +281,7 @@ def gather_wince_supported_platforms():
 
 def gather_msvc_detected_versions():
 	#Detected MSVC versions!
-	version_pattern = re.compile('^(\d\d?\.\d\d?)(Exp)?$')
+	version_pattern = re.compile(r'^(\d\d?\.\d\d?)(Exp)?$')
 	detected_versions = []
 	for vcver,vcvar in (('VCExpress','Exp'), ('VisualStudio','')):
 		prefix = 'SOFTWARE\\Wow6432node\\Microsoft\\' + vcver
@@ -367,7 +367,7 @@ def gather_wsdk_versions(conf, versions):
 	:param versions: list to modify
 	:type versions: list
 	"""
-	version_pattern = re.compile('^v..?.?\...?.?')
+	version_pattern = re.compile(r'^v..?.?\...?.?')
 	try:
 		all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows')
 	except OSError:
@@ -525,7 +525,7 @@ def gather_icl_versions(conf, versions):
 	:param versions: list to modify
 	:type versions: list
 	"""
-	version_pattern = re.compile('^...?.?\....?.?')
+	version_pattern = re.compile(r'^...?.?\....?.?')
 	try:
 		all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++')
 	except OSError:
@@ -579,7 +579,7 @@ def gather_intel_composer_versions(conf, versions):
 	:param versions: list to modify
 	:type versions: list
 	"""
-	version_pattern = re.compile('^...?.?\...?.?.?')
+	version_pattern = re.compile(r'^...?.?\...?.?.?')
 	try:
 		all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Suites')
 	except OSError:
@@ -683,7 +683,7 @@ def find_lt_names_msvc(self, libname, is_static=False):
 				if not is_static and ltdict.get('library_names', ''):
 					dllnames=ltdict['library_names'].split()
 					dll=dllnames[0].lower()
-					dll=re.sub('\.dll$', '', dll)
+					dll=re.sub(r'\.dll$', '', dll)
 					return (lt_libdir, dll, False)
 				elif ltdict.get('old_library', ''):
 					olib=ltdict['old_library']
@@ -700,7 +700,7 @@ def find_lt_names_msvc(self, libname, is_static=False):
 @conf
 def libname_msvc(self, libname, is_static=False):
 	lib = libname.lower()
-	lib = re.sub('\.lib$','',lib)
+	lib = re.sub(r'\.lib$','',lib)
 
 	if lib in g_msvc_systemlibs:
 		return lib
@@ -747,11 +747,11 @@ def libname_msvc(self, libname, is_static=False):
 		for libn in libnames:
 			if os.path.exists(os.path.join(path, libn)):
 				Logs.debug('msvc: lib found: %s', os.path.join(path,libn))
-				return re.sub('\.lib$', '',libn)
+				return re.sub(r'\.lib$', '',libn)
 
 	#if no lib can be found, just return the libname as msvc expects it
 	self.fatal('The library %r could not be found' % libname)
-	return re.sub('\.lib$', '', libname)
+	return re.sub(r'\.lib$', '', libname)
 
 @conf
 def check_lib_msvc(self, libname, is_static=False, uselib_store=None):
diff --git a/Tools/python.py b/Tools/python.py
index 25841d03..01a2c9aa 100644
--- a/Tools/python.py
+++ b/Tools/python.py
@@ -329,6 +329,10 @@ def check_python_headers(conf, features='pyembed pyext'):
 	conf.find_program([''.join(pybin) + '-config', 'python%s-config' % num, 'python-config-%s' % num, 'python%sm-config' % num], var='PYTHON_CONFIG', msg="python-config", mandatory=False)
 
 	if env.PYTHON_CONFIG:
+		# check python-config output only once
+		if conf.env.HAVE_PYTHON_H:
+			return
+
 		# python2.6-config requires 3 runs
 		all_flags = [['--cflags', '--libs', '--ldflags']]
 		if sys.hexversion < 0x2070000:
diff --git a/Tools/qt5.py b/Tools/qt5.py
index 4f9c6908..9f432801 100644
--- a/Tools/qt5.py
+++ b/Tools/qt5.py
@@ -313,11 +313,11 @@ def apply_qt5(self):
 
 	The additional parameters are:
 
-	:param lang: list of translation files (\*.ts) to process
+	:param lang: list of translation files (\\*.ts) to process
 	:type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension
-	:param update: whether to process the C++ files to update the \*.ts files (use **waf --translate**)
+	:param update: whether to process the C++ files to update the \\*.ts files (use **waf --translate**)
 	:type update: bool
-	:param langname: if given, transform the \*.ts files into a .qrc files to include in the binary file
+	:param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file
 	:type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension
 	"""
 	if getattr(self, 'lang', None):
@@ -762,7 +762,7 @@ def set_qt5_libs_to_check(self):
 		if self.environ.get('QT5_FORCE_STATIC'):
 			pat = self.env.cxxstlib_PATTERN
 		if Utils.unversioned_sys_platform() == 'darwin':
-			pat = "%s\.framework"
+			pat = r"%s\.framework"
 		re_qt = re.compile(pat%'Qt5?(?P<name>.*)'+'$')
 		for x in dirlst:
 			m = re_qt.match(x)
diff --git a/Tools/waf_unit_test.py b/Tools/waf_unit_test.py
index a71ed1c0..74d6c056 100644
--- a/Tools/waf_unit_test.py
+++ b/Tools/waf_unit_test.py
@@ -205,7 +205,7 @@ class utest(Task.Task):
 		return self.exec_command(self.ut_exec)
 
 	def exec_command(self, cmd, **kw):
-		Logs.debug('runner: %r', cmd)
+		self.generator.bld.log_command(cmd, kw)
 		if getattr(Options.options, 'dump_test_scripts', False):
 			script_code = SCRIPT_TEMPLATE % {
 				'python': sys.executable,
diff --git a/Tools/winres.py b/Tools/winres.py
index 586c596c..9be1ed66 100644
--- a/Tools/winres.py
+++ b/Tools/winres.py
@@ -24,8 +24,8 @@ def rc_file(self, node):
 		self.compiled_tasks = [rctask]
 
 re_lines = re.compile(
-	'(?:^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*?)\s*$)|'\
-	'(?:^\w+[ \t]*(ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)[ \t]*(.*?)\s*$)',
+	r'(?:^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*?)\s*$)|'\
+	r'(?:^\w+[ \t]*(ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)[ \t]*(.*?)\s*$)',
 	re.IGNORECASE | re.MULTILINE)
 
 class rc_parser(c_preproc.c_parser):
diff --git a/Utils.py b/Utils.py
index b4665c4d..4b808a85 100644
--- a/Utils.py
+++ b/Utils.py
@@ -49,10 +49,16 @@ try:
 	from hashlib import md5
 except ImportError:
 	try:
-		from md5 import md5
+		from hashlib import sha1 as md5
 	except ImportError:
-		# never fail to enable fixes from another module
+		# never fail to enable potential fixes from another module
 		pass
+else:
+	try:
+		md5().digest()
+	except ValueError:
+		# Fips? #2213
+		from hashlib import sha1 as md5
 
 try:
 	import threading
@@ -202,7 +208,7 @@ class lazy_generator(object):
 
 	next = __next__
 
-is_win32 = os.sep == '\\' or sys.platform == 'win32' # msys2
+is_win32 = os.sep == '\\' or sys.platform == 'win32' or os.name == 'nt' # msys2
 """
 Whether this system is a Windows series
 """
@@ -484,7 +490,9 @@ def split_path_msys(path):
 if sys.platform == 'cygwin':
 	split_path = split_path_cygwin
 elif is_win32:
-	if os.environ.get('MSYSTEM'):
+	# Consider this an MSYSTEM environment if $MSYSTEM is set and python
+	# reports is executable from a unix like path on a windows host.
+	if os.environ.get('MSYSTEM') and sys.executable.startswith('/'):
 		split_path = split_path_msys
 	else:
 		split_path = split_path_win32
@@ -730,7 +738,7 @@ def unversioned_sys_platform():
 	if s == 'cli' and os.name == 'nt':
 		# ironpython is only on windows as far as we know
 		return 'win32'
-	return re.split('\d+$', s)[0]
+	return re.split(r'\d+$', s)[0]
 
 def nada(*k, **kw):
 	"""
diff --git a/ansiterm.py b/ansiterm.py
index 0d20c637..027f0ad6 100644
--- a/ansiterm.py
+++ b/ansiterm.py
@@ -264,7 +264,7 @@ else:
 			'u': pop_cursor,
 		}
 		# Match either the escape sequence or text not containing escape sequence
-		ansi_tokens = re.compile('(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))')
+		ansi_tokens = re.compile(r'(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))')
 		def write(self, text):
 			try:
 				wlock.acquire()
diff --git a/extras/autowaf.py b/extras/autowaf.py
index 92d0e57e..51077d18 100644
--- a/extras/autowaf.py
+++ b/extras/autowaf.py
@@ -4,40 +4,46 @@ import subprocess
 import sys
 import time
 
-from waflib import Build, Context, Logs, Options, Utils
+from waflib import Configure, ConfigSet, Build, Context, Logs, Options, Utils
 from waflib.TaskGen import feature, before, after
 
 global g_is_child
 g_is_child = False
 
-# Only run autowaf hooks once (even if sub projects call several times)
-global g_step
-g_step = 0
+NONEMPTY = -10
 
-global line_just
-line_just = 40
+if sys.platform == 'win32':
+    lib_path_name = 'PATH'
+elif sys.platform == 'darwin':
+    lib_path_name = 'DYLD_LIBRARY_PATH'
+else:
+    lib_path_name = 'LD_LIBRARY_PATH'
 
 # Compute dependencies globally
 # import preproc
 # preproc.go_absolute = True
 
-# Test context that inherits build context to make configuration available
-class TestContext(Build.BuildContext):
-    "Run tests"
-    cmd = 'test'
-    fun = 'test'
-
 @feature('c', 'cxx')
 @after('apply_incpaths')
 def include_config_h(self):
     self.env.append_value('INCPATHS', self.bld.bldnode.abspath())
 
-def set_options(opt, debug_by_default=False, test=False):
-    "Add standard autowaf options if they havn't been added yet"
-    global g_step
-    if g_step > 0:
-        return
+class OptionsContext(Options.OptionsContext):
+    def __init__(self, **kwargs):
+        super(OptionsContext, self).__init__(**kwargs)
+        set_options(self)
 
+    def configuration_options(self):
+        return self.get_option_group('Configuration options')
+
+    def add_flags(self, group, flags):
+        """Tersely add flags (a dictionary of longname:desc) to a group"""
+        for name, desc in flags.items():
+            group.add_option('--' + name, action='store_true',
+                             dest=name.replace('-', '_'), help=desc)
+
+def set_options(opt, debug_by_default=False):
+    "Add standard autowaf options"
     opts = opt.get_option_group('Configuration options')
 
     # Standard directory options
@@ -77,7 +83,7 @@ def set_options(opt, debug_by_default=False, test=False):
                     help="build documentation (requires doxygen)")
 
     # Test options
-    if test:
+    if hasattr(Context.g_module, 'test'):
         test_opts = opt.add_option_group('Test options', '')
         opts.add_option('-T', '--test', action='store_true', dest='build_tests',
                         help='build unit tests')
@@ -88,12 +94,41 @@ def set_options(opt, debug_by_default=False, test=False):
                              dest='test_wrapper',
                              help='command prefix for tests (e.g. valgrind)')
 
-    g_step = 1
+    # Run options
+    run_opts = opt.add_option_group('Run options')
+    run_opts.add_option('--cmd', type='string', dest='cmd',
+                        help='command to run from build directory')
+
+class ConfigureContext(Configure.ConfigurationContext):
+    """configures the project"""
 
-def add_flags(opt, flags):
-    for name, desc in flags.items():
-        opt.add_option('--' + name, action='store_true',
-                       dest=name.replace('-', '_'), help=desc)
+    def __init__(self, **kwargs):
+        self.line_just = 45
+        if hasattr(Context.g_module, 'line_just'):
+            self.line_just = Context.g_module.line_just
+
+        super(ConfigureContext, self).__init__(**kwargs)
+        self.run_env = ConfigSet.ConfigSet()
+        self.system_include_paths = set()
+
+    def pre_recurse(self, node):
+        if len(self.stack_path) == 1:
+            Logs.pprint('BOLD', 'Configuring %s' % node.parent.srcpath())
+        super(ConfigureContext, self).pre_recurse(node)
+
+    def store(self):
+        self.env.AUTOWAF_RUN_ENV = self.run_env.get_merged_dict()
+        for path in sorted(self.system_include_paths):
+            if 'COMPILER_CC' in self.env:
+                self.env.append_value('CFLAGS', ['-isystem', path])
+            if 'COMPILER_CXX' in self.env:
+                self.env.append_value('CXXFLAGS', ['-isystem', path])
+
+        super(ConfigureContext, self).store()
+
+    def build_path(self, path='.'):
+        """Return `path` within the build directory"""
+        return str(self.path.get_bld().find_node(path))
 
 def get_check_func(conf, lang):
     if lang == 'c':
@@ -179,14 +214,8 @@ def check_pkg(conf, name, **args):
         conf.env[var_name] = CheckType.OPTIONAL
 
     if not conf.env.MSVC_COMPILER and 'system' in args and args['system']:
-        includes = conf.env['INCLUDES_' + nameify(args['uselib_store'])]
-        for path in includes:
-            if 'COMPILER_CC' in conf.env:
-                conf.env.append_value('CFLAGS', ['-isystem', path])
-            if 'COMPILER_CXX' in conf.env:
-                conf.env.append_value('CXXFLAGS', ['-isystem', path])
-
-        conf.env.append_value('CXXFLAGS', ['-isystem', '/usr/local/include'])
+        conf.system_include_paths.update(
+            conf.env['INCLUDES_' + nameify(args['uselib_store'])])
 
 def normpath(path):
     if sys.platform == 'win32':
@@ -195,10 +224,6 @@ def normpath(path):
         return os.path.normpath(path)
 
 def configure(conf):
-    global g_step
-    if g_step > 1:
-        return
-
     def append_cxx_flags(flags):
         conf.env.append_value('CFLAGS', flags)
         conf.env.append_value('CXXFLAGS', flags)
@@ -344,11 +369,9 @@ def configure(conf):
 
     conf.env.prepend_value('CFLAGS', '-I' + os.path.abspath('.'))
     conf.env.prepend_value('CXXFLAGS', '-I' + os.path.abspath('.'))
-    g_step = 2
 
 def display_summary(conf, msgs=None):
-    global g_is_child
-    if not g_is_child:
+    if len(conf.stack_path) == 1:
         display_msg(conf, "Install prefix", conf.env['PREFIX'])
         if 'COMPILER_CC' in conf.env:
             display_msg(conf, "C Flags", ' '.join(conf.env['CFLAGS']))
@@ -463,25 +486,17 @@ def set_lib_env(conf, name, version):
     major_ver    = version.split('.')[0]
     pkg_var_name = 'PKG_' + name.replace('-', '_') + '_' + major_ver
     lib_name     = '%s-%s' % (name, major_ver)
+    lib_path     = [str(conf.path.get_bld())]
     if conf.env.PARDEBUG:
         lib_name += 'D'
     conf.env[pkg_var_name]       = lib_name
     conf.env['INCLUDES_' + NAME] = ['${INCLUDEDIR}/%s-%s' % (name, major_ver)]
-    conf.env['LIBPATH_' + NAME]  = [conf.env.LIBDIR]
+    conf.env['LIBPATH_' + NAME]  = lib_path
     conf.env['LIB_' + NAME]      = [lib_name]
 
+    conf.run_env.append_unique(lib_path_name, lib_path)
     conf.define(NAME + '_VERSION', version)
 
-def set_line_just(conf, width):
-    global line_just
-    line_just = max(line_just, width)
-    conf.line_just = line_just
-
-def display_header(title):
-    global g_is_child
-    if g_is_child:
-        Logs.pprint('BOLD', title)
-
 def display_msg(conf, msg, status=None, color=None):
     color = 'CYAN'
     if type(status) == bool and status:
@@ -613,7 +628,8 @@ def make_simple_dox(name):
         os.chdir(top)
     except Exception as e:
         Logs.error("Failed to fix up %s documentation: %s" % (name, e))
-
+    finally:
+        os.chdir(top)
 
 def build_dox(bld, name, version, srcdir, blddir, outdir='', versioned=True):
     """Build Doxygen API documentation"""
@@ -744,18 +760,76 @@ def build_i18n(bld, srcdir, dir, name, sources, copyright_holder=None):
     build_i18n_po(bld, srcdir, dir, name, sources, copyright_holder)
     build_i18n_mo(bld, srcdir, dir, name, sources, copyright_holder)
 
-def cd_to_build_dir(ctx, appname):
-    top_level = (len(ctx.stack_path) > 1)
-    if top_level:
-        os.chdir(os.path.join('build', appname))
-    else:
-        os.chdir('build')
+class ExecutionEnvironment:
+    """Context that sets system environment variables for program execution"""
+    def __init__(self, changes):
+        self.original_environ = os.environ.copy()
 
-def cd_to_orig_dir(ctx, child):
-    if child:
-        os.chdir(os.path.join('..', '..'))
-    else:
-        os.chdir('..')
+        self.diff = {}
+        for path_name, paths in changes.items():
+            value = os.pathsep.join(paths)
+            if path_name in os.environ:
+                value += os.pathsep + os.environ[path_name]
+
+            self.diff[path_name] = value
+
+        os.environ.update(self.diff)
+
+    def __str__(self):
+        return '\n'.join({'%s="%s"' % (k, v) for k, v in self.diff.items()})
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        os.environ = self.original_environ
+
+class RunContext(Build.BuildContext):
+    "runs an executable from the build directory"
+    cmd = 'run'
+
+    def execute(self):
+        self.restore()
+        if not self.all_envs:
+            self.load_envs()
+
+        with ExecutionEnvironment(self.env.AUTOWAF_RUN_ENV) as env:
+            if Options.options.verbose:
+                Logs.pprint('GREEN', str(env) + '\n')
+
+            if Options.options.cmd:
+                Logs.pprint('GREEN', 'Running %s' % Options.options.cmd)
+                subprocess.call(Options.options.cmd, shell=True)
+            else:
+                Logs.error("error: Missing --cmd option for run command")
+
+def show_diff(from_lines, to_lines, from_filename, to_filename):
+    import difflib
+    import sys
+
+    for line in difflib.unified_diff(
+            from_lines, to_lines,
+            fromfile=os.path.abspath(from_filename),
+            tofile=os.path.abspath(to_filename)):
+        sys.stderr.write(line)
+
+def test_file_equals(patha, pathb):
+    import filecmp
+    import io
+
+    for path in (patha, pathb):
+        if not os.access(path, os.F_OK):
+            Logs.pprint('RED', 'error: missing file %s' % path)
+            return False
+
+    if filecmp.cmp(patha, pathb, shallow=False):
+        return True
+
+    with io.open(patha, 'rU', encoding='utf-8') as fa:
+        with io.open(pathb, 'rU', encoding='utf-8') as fb:
+            show_diff(fa.readlines(), fb.readlines(), patha, pathb)
+
+    return False
 
 def bench_time():
     if hasattr(time, 'perf_counter'): # Added in Python 3.3
@@ -763,207 +837,308 @@ def bench_time():
     else:
         return time.time()
 
-def pre_test(ctx, appname, dirs=['src']):
-    Logs.pprint('GREEN', '\n[==========] Running %s tests' % appname)
-
-    if not hasattr(ctx, 'autowaf_tests_total'):
-        ctx.autowaf_tests_start_time   = bench_time()
-        ctx.autowaf_tests_total        = 0
-        ctx.autowaf_tests_failed       = 0
-        ctx.autowaf_local_tests_total  = 0
-        ctx.autowaf_local_tests_failed = 0
-        ctx.autowaf_tests              = {}
-
-    ctx.autowaf_tests[appname] = {'total': 0, 'failed': 0}
-
-    cd_to_build_dir(ctx, appname)
-    if not ctx.env.NO_COVERAGE:
-        diropts  = ''
-        for i in dirs:
-            diropts += ' -d ' + i
-        clear_log = open('lcov-clear.log', 'w')
-        try:
-            try:
-                # Clear coverage data
-                subprocess.call(('lcov %s -z' % diropts).split(),
-                                stdout=clear_log, stderr=clear_log)
-            except Exception:
-                Logs.warn('Failed to run lcov, no coverage report generated')
-        finally:
-            clear_log.close()
-
-class TestFailed(Exception):
-    pass
-
-def post_test(ctx, appname, dirs=['src'], remove=['*boost*', 'c++*']):
-    if not ctx.env.NO_COVERAGE:
-        diropts  = ''
-        for i in dirs:
-            diropts += ' -d ' + i
-        coverage_log           = open('lcov-coverage.log', 'w')
-        coverage_lcov          = open('coverage.lcov', 'w')
-        coverage_stripped_lcov = open('coverage-stripped.lcov', 'w')
+class TestOutput:
+    """Test output that is truthy if result is as expected"""
+    def __init__(self, expected, result=None):
+        self.stdout = self.stderr = None
+        self.expected = expected
+        self.result = result
+
+    def __bool__(self):
+        return self.expected is None or self.result == self.expected
+
+    __nonzero__ = __bool__
+
+def is_string(s):
+    if sys.version_info[0] < 3:
+        return isinstance(s, basestring)
+    return isinstance(s, str)
+
+class TestScope:
+    """Scope for running tests that maintains pass/fail statistics"""
+    def __init__(self, tst, name, defaults):
+        self.tst = tst
+        self.name = name
+        self.defaults = defaults
+        self.n_failed = 0
+        self.n_total = 0
+
+    def run(self, test, **kwargs):
+        if callable(test):
+            output = self._run_callable(test, **kwargs)
+        elif type(test) == list:
+            if 'name' not in kwargs:
+                import pipes
+                kwargs['name'] = ' '.join(map(pipes.quote, test))
+
+            output = self._run_command(test, **kwargs)
+        else:
+            raise Exception("Unknown test type")
+
+        if not output:
+            self.tst.log_bad('FAILED', kwargs['name'])
+
+        return self.tst.test_result(output)
+
+    def _run_callable(self, test, **kwargs):
+        expected = kwargs['expected'] if 'expected' in kwargs else True
+        return TestOutput(expected, test())
+
+    def _run_command(self, test, **kwargs):
+        if 'stderr' in kwargs and kwargs['stderr'] == NONEMPTY:
+            # Run with a temp file for stderr and check that it is non-empty
+            import tempfile
+            with tempfile.TemporaryFile(mode='w') as stderr:
+                kwargs['stderr'] = stderr
+                output = self.run(test, **kwargs)
+                return (output if not output else
+                        self.run(
+                            lambda: stderr.tell() > 0,
+                            name=kwargs['name'] + ' error message'))
+
         try:
-            try:
-                base = '.'
-                if g_is_child:
-                    base = '..'
-
-                # Generate coverage data
-                lcov_cmd = 'lcov -c %s -b %s' % (diropts, base)
-                if ctx.env.LLVM_COV:
-                    lcov_cmd += ' --gcov-tool %s' % ctx.env.LLVM_COV[0]
-                subprocess.call(lcov_cmd.split(),
-                                stdout=coverage_lcov, stderr=coverage_log)
-
-                # Strip unwanted stuff
-                subprocess.call(
-                    ['lcov', '--remove', 'coverage.lcov'] + remove,
-                    stdout=coverage_stripped_lcov, stderr=coverage_log)
-
-                # Generate HTML coverage output
-                if not os.path.isdir('coverage'):
-                    os.makedirs('coverage')
-                subprocess.call(
-                    'genhtml -o coverage coverage-stripped.lcov'.split(),
-                    stdout=coverage_log, stderr=coverage_log)
-
-            except Exception:
-                Logs.warn('Failed to run lcov, no coverage report generated')
+            # Run with stdout and stderr set to the appropriate streams
+            out_stream = self._stream('stdout', kwargs)
+            err_stream = self._stream('stderr', kwargs)
+            return self._exec(test, **kwargs)
         finally:
-            coverage_stripped_lcov.close()
-            coverage_lcov.close()
-            coverage_log.close()
-
-    duration = (bench_time() - ctx.autowaf_tests_start_time) * 1000.0
-    total_tests = ctx.autowaf_tests[appname]['total']
-    failed_tests = ctx.autowaf_tests[appname]['failed']
-    passed_tests = total_tests - failed_tests
-    Logs.pprint('GREEN', '\n[==========] %d tests from %s ran (%d ms total)' % (
-        total_tests, appname, duration))
-    if not ctx.env.NO_COVERAGE:
-        Logs.pprint('GREEN', '[----------] Coverage: <file://%s>'
-                    % os.path.abspath('coverage/index.html'))
-
-    Logs.pprint('GREEN', '[  PASSED  ] %d tests' % passed_tests)
-    if failed_tests > 0:
-        Logs.pprint('RED', '[  FAILED  ] %d tests' % failed_tests)
-        raise TestFailed('Tests from %s failed' % appname)
-    Logs.pprint('', '')
-
-    top_level = (len(ctx.stack_path) > 1)
-    if top_level:
-        cd_to_orig_dir(ctx, top_level)
-
-def run_test(ctx,
-             appname,
-             test,
-             desired_status=0,
-             dirs=['src'],
-             name='',
-             header=False,
-             quiet=False):
-    """Run an individual test.
-
-    `test` is either a shell command string, or a list of [name, return status]
-    for displaying tests implemented in the calling Python code.
-    """
-
-    ctx.autowaf_tests_total += 1
-    ctx.autowaf_local_tests_total += 1
-    ctx.autowaf_tests[appname]['total'] += 1
+            out_stream = out_stream.close() if out_stream else None
+            err_stream = err_stream.close() if err_stream else None
+
+    def _stream(self, stream_name, kwargs):
+        s = kwargs[stream_name] if stream_name in kwargs else None
+        if is_string(s):
+            kwargs[stream_name] = open(s, 'wb')
+            return kwargs[stream_name]
+        return None
+
+    def _exec(self,
+              test,
+              expected=0,
+              name='',
+              stdin=None,
+              stdout=None,
+              stderr=None,
+              verbosity=1):
+        def stream(s):
+            return open(s, 'wb') if type(s) == str else s
+
+        if verbosity > 1:
+            self.tst.log_good('RUN     ', name)
 
-    out = (None, None)
-    if type(test) == list:
-        name       = test[0]
-        returncode = test[1]
-    elif callable(test):
-        returncode = test()
-    else:
-        s = test
-        if isinstance(test, type([])):
-            s = ' '.join(test)
-        if header and not quiet:
-            Logs.pprint('Green', '\n[ RUN      ] %s' % s)
-        cmd = test
         if Options.options.test_wrapper:
-            cmd = Options.options.test_wrapper + ' ' + test
-        if name == '':
-            name = test
-
-        proc = subprocess.Popen(cmd, shell=True,
-                                stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-        out = proc.communicate()
-        returncode = proc.returncode
-
-    success = desired_status is None or returncode == desired_status
-    if success:
-        if not quiet:
-            Logs.pprint('GREEN', '[       OK ] %s' % name)
-    else:
-        Logs.pprint('RED', '[  FAILED  ] %s' % name)
-        ctx.autowaf_tests_failed += 1
-        ctx.autowaf_local_tests_failed += 1
-        ctx.autowaf_tests[appname]['failed'] += 1
-        if type(test) != list and not callable(test):
-            Logs.pprint('RED', test)
-
-    if Options.options.verbose and type(test) != list and not callable(test):
-        sys.stdout.write(out[0].decode('utf-8'))
-        sys.stderr.write(out[1].decode('utf-8'))
-
-    return (success, out)
-
-def tests_name(ctx, appname, name='*'):
-    if name == '*':
-        return appname
-    else:
-        return '%s.%s' % (appname, name)
+            test = [Options.options.test_wrapper] + test
 
-def begin_tests(ctx, appname, name='*'):
-    ctx.autowaf_local_tests_failed = 0
-    ctx.autowaf_local_tests_total  = 0
-    ctx.autowaf_local_tests_start_time = bench_time()
-    Logs.pprint('GREEN', '\n[----------] %s' % (
-        tests_name(ctx, appname, name)))
+        output = TestOutput(expected)
+        with open(os.devnull, 'wb') as null:
+            out = null if verbosity < 3 and not stdout else stdout
+            err = null if verbosity < 2 and not stderr else stderr
+            proc = subprocess.Popen(test, stdin=stdin, stdout=out, stderr=err)
+            output.stdout, output.stderr = proc.communicate()
+            output.result = proc.returncode
 
-    class Handle:
-        def __enter__(self):
-            pass
+        if output and verbosity > 0:
+            self.tst.log_good('      OK', name)
 
-        def __exit__(self, type, value, traceback):
-            end_tests(ctx, appname, name)
+        return output
 
-    return Handle()
+class TestContext(Build.BuildContext):
+    "runs test suite"
+    fun = cmd = 'test'
 
-def end_tests(ctx, appname, name='*'):
-    duration = (bench_time() - ctx.autowaf_local_tests_start_time) * 1000.0
-    total = ctx.autowaf_local_tests_total
-    failures = ctx.autowaf_local_tests_failed
-    if failures == 0:
-        Logs.pprint('GREEN', '[----------] %d tests from %s (%d ms total)' % (
-            ctx.autowaf_local_tests_total, tests_name(ctx, appname, name), duration))
-    else:
-        Logs.pprint('RED', '[----------] %d/%d tests from %s (%d ms total)' % (
-            total - failures, total, tests_name(ctx, appname, name), duration))
-
-def run_tests(ctx,
-              appname,
-              tests,
-              desired_status=0,
-              dirs=['src'],
-              name='*',
-              headers=False):
-    begin_tests(ctx, appname, name)
-
-    diropts  = ''
-    for i in dirs:
-        diropts += ' -d ' + i
-
-    for i in tests:
-        run_test(ctx, appname, i, desired_status, dirs, i, headers)
-
-    end_tests(ctx, appname, name)
+    def __init__(self, **kwargs):
+        super(TestContext, self).__init__(**kwargs)
+        self.start_time = bench_time()
+        self.max_depth = 1
+
+        defaults = {'verbosity': Options.options.verbose}
+        self.stack = [TestScope(self, Context.g_module.APPNAME, defaults)]
+
+    def defaults(self):
+        return self.stack[-1].defaults
+
+    def finalize(self):
+        if self.stack[-1].n_failed > 0:
+            sys.exit(1)
+
+        super(TestContext, self).finalize()
+
+    def __call__(self, test, **kwargs):
+        return self.stack[-1].run(test, **self.args(**kwargs))
+
+    def file_equals(self, from_path, to_path, **kwargs):
+        kwargs.update({'expected': True,
+                       'name': '%s == %s' % (from_path, to_path)})
+        return self(lambda: test_file_equals(from_path, to_path), **kwargs)
+
+    def log_good(self, title, fmt, *args):
+        Logs.pprint('GREEN', '[%s] %s' % (title.center(10), fmt % args))
+
+    def log_bad(self, title, fmt, *args):
+        Logs.pprint('RED', '[%s] %s' % (title.center(10), fmt % args))
+
+    def pre_recurse(self, node):
+        wscript_module = Context.load_module(node.abspath())
+        group_name = wscript_module.APPNAME
+        self.stack.append(TestScope(self, group_name, self.defaults()))
+        self.max_depth = max(self.max_depth, len(self.stack) - 1)
+
+        bld_dir = node.get_bld().parent
+        if bld_dir != self.path.get_bld():
+            Logs.info('')
+
+        self.original_dir = os.getcwd()
+        Logs.info("Waf: Entering directory `%s'\n", bld_dir)
+        os.chdir(str(bld_dir))
+
+        if str(node.parent) == Context.top_dir:
+            self.clear_coverage()
+
+        self.log_good('=' * 10, 'Running %s tests', group_name)
+        super(TestContext, self).pre_recurse(node)
+
+    def test_result(self, success):
+        self.stack[-1].n_total += 1
+        self.stack[-1].n_failed += 1 if not success else 0
+        return success
+
+    def pop(self):
+        scope = self.stack.pop()
+        self.stack[-1].n_total += scope.n_total
+        self.stack[-1].n_failed += scope.n_failed
+        return scope
+
+    def post_recurse(self, node):
+        super(TestContext, self).post_recurse(node)
+
+        scope = self.pop()
+        duration = (bench_time() - self.start_time) * 1000.0
+        is_top = str(node.parent) == str(Context.top_dir)
+
+        if is_top and self.max_depth > 1:
+            Logs.info('')
+
+        self.log_good('=' * 10, '%d tests from %s ran (%d ms total)',
+                      scope.n_total, scope.name, duration)
+
+        if not self.env.NO_COVERAGE:
+            if is_top:
+                self.gen_coverage()
+
+            if os.path.exists('coverage/index.html'):
+                self.log_good('COVERAGE', '<file://%s>',
+                              os.path.abspath('coverage/index.html'))
+
+        successes = scope.n_total - scope.n_failed
+        Logs.pprint('GREEN', '[  PASSED  ] %d tests' % successes)
+        if scope.n_failed > 0:
+            Logs.pprint('RED', '[  FAILED  ] %d tests' % scope.n_failed)
+        if is_top:
+            Logs.info("\nWaf: Leaving directory `%s'" % os.getcwd())
+
+        os.chdir(self.original_dir)
+
+    def execute(self):
+        self.restore()
+        if not self.all_envs:
+            self.load_envs()
+
+        if not self.env.BUILD_TESTS:
+            self.fatal('Configuration does not include tests')
+
+        with ExecutionEnvironment(self.env.AUTOWAF_RUN_ENV) as env:
+            if self.defaults()['verbosity'] > 0:
+                Logs.pprint('GREEN', str(env) + '\n')
+            self.recurse([self.run_dir])
+
+    def src_path(self, path):
+        return os.path.relpath(os.path.join(str(self.path), path))
+
+    def args(self, **kwargs):
+        all_kwargs = self.defaults().copy()
+        all_kwargs.update(kwargs)
+        return all_kwargs
+
+    def group(self, name, **kwargs):
+        return TestGroup(
+            self, self.stack[-1].name, name, **self.args(**kwargs))
+
+    def set_test_defaults(self, **kwargs):
+        """Set default arguments to be passed to all tests"""
+        self.stack[-1].defaults.update(kwargs)
+
+    def clear_coverage(self):
+        """Zero old coverage data"""
+        try:
+            with open('cov-clear.log', 'w') as log:
+                subprocess.call(['lcov', '-z', '-d', str(self.path)],
+                                stdout=log, stderr=log)
+
+        except Exception:
+            Logs.warn('Failed to run lcov to clear old coverage data')
+
+    def gen_coverage(self):
+        """Generate coverage data and report"""
+        try:
+            with open('cov.lcov', 'w') as out:
+                with open('cov.log', 'w') as err:
+                    subprocess.call(['lcov', '-c', '--no-external',
+                                     '--rc', 'lcov_branch_coverage=1',
+                                     '-b', '.',
+                                     '-d', str(self.path)],
+                                    stdout=out, stderr=err)
+
+            if not os.path.isdir('coverage'):
+                os.makedirs('coverage')
+
+            with open('genhtml.log', 'w') as log:
+                subprocess.call(['genhtml',
+                                 '-o', 'coverage',
+                                 '--rc', 'genhtml_branch_coverage=1',
+                                 'cov.lcov'],
+                                stdout=log, stderr=log)
+
+        except Exception:
+            Logs.warn('Failed to run lcov to generate coverage report')
+
+class TestGroup:
+    def __init__(self, tst, suitename, name, **kwargs):
+        self.tst = tst
+        self.suitename = suitename
+        self.name = name
+        self.kwargs = kwargs
+        self.start_time = bench_time()
+        tst.stack.append(TestScope(tst, name, tst.defaults()))
+
+    def label(self):
+        return self.suitename + '.%s' % self.name if self.name else ''
+
+    def args(self, **kwargs):
+        all_kwargs = self.tst.args(**self.kwargs)
+        all_kwargs.update(kwargs)
+        return all_kwargs
+
+    def __enter__(self):
+        if 'verbosity' in self.kwargs and self.kwargs['verbosity'] > 0:
+            self.tst.log_good('-' * 10, self.label())
+        return self
+
+    def __call__(self, test, **kwargs):
+        return self.tst(test, **self.args(**kwargs))
+
+    def file_equals(self, from_path, to_path, **kwargs):
+        return self.tst.file_equals(from_path, to_path, **kwargs)
+
+    def __exit__(self, type, value, traceback):
+        duration = (bench_time() - self.start_time) * 1000.0
+        scope = self.tst.pop()
+        n_passed = scope.n_total - scope.n_failed
+        if scope.n_failed == 0:
+            self.tst.log_good('-' * 10, '%d tests from %s (%d ms total)',
+                              scope.n_total, self.label(), duration)
+        else:
+            self.tst.log_bad('-' * 10, '%d/%d tests from %s (%d ms total)',
+                             n_passed, scope.n_total, self.label(), duration)
 
 def run_ldconfig(ctx):
     should_run = (ctx.cmd == 'install' and
diff --git a/extras/buildcopy.py b/extras/buildcopy.py
index a6d9ac83..eaff7e60 100644
--- a/extras/buildcopy.py
+++ b/extras/buildcopy.py
@@ -22,7 +22,7 @@ Examples::
 
 """
 import os, shutil
-from waflib import Errors, Task, TaskGen, Utils, Node
+from waflib import Errors, Task, TaskGen, Utils, Node, Logs
 
 @TaskGen.before_method('process_source')
 @TaskGen.feature('buildcopy')
@@ -58,10 +58,13 @@ def make_buildcopy(self):
 		raise Errors.WafError('buildcopy: File not found in src: %s'%os.path.join(*lst))
 
 	nodes = [ to_src_nodes(n) for n in getattr(self, 'buildcopy_source', getattr(self, 'source', [])) ]
+	if not nodes:
+		Logs.warn('buildcopy: No source files provided to buildcopy in %s (set `buildcopy_source` or `source`)',
+			self)
+		return
 	node_pairs = [(n, n.get_bld()) for n in nodes]
 	self.create_task('buildcopy', [n[0] for n in node_pairs], [n[1] for n in node_pairs], node_pairs=node_pairs)
 
-
 class buildcopy(Task.Task):
 	"""
 	Copy for each pair `n` in `node_pairs`: n[0] -> n[1].
diff --git a/extras/cpplint.py b/extras/cpplint.py
index e3302e5b..8cdd6dda 100644
--- a/extras/cpplint.py
+++ b/extras/cpplint.py
@@ -43,12 +43,12 @@ from waflib import Errors, Task, TaskGen, Logs, Options, Node, Utils
 
 critical_errors = 0
 CPPLINT_FORMAT = '[CPPLINT] %(filename)s:\nline %(linenum)s, severity %(confidence)s, category: %(category)s\n%(message)s\n'
-RE_EMACS = re.compile('(?P<filename>.*):(?P<linenum>\d+):  (?P<message>.*)  \[(?P<category>.*)\] \[(?P<confidence>\d+)\]')
+RE_EMACS = re.compile(r'(?P<filename>.*):(?P<linenum>\d+):  (?P<message>.*)  \[(?P<category>.*)\] \[(?P<confidence>\d+)\]')
 CPPLINT_RE = {
     'waf': RE_EMACS,
     'emacs': RE_EMACS,
-    'vs7': re.compile('(?P<filename>.*)\((?P<linenum>\d+)\):  (?P<message>.*)  \[(?P<category>.*)\] \[(?P<confidence>\d+)\]'),
-    'eclipse': re.compile('(?P<filename>.*):(?P<linenum>\d+): warning: (?P<message>.*)  \[(?P<category>.*)\] \[(?P<confidence>\d+)\]'),
+    'vs7': re.compile(r'(?P<filename>.*)\((?P<linenum>\d+)\):  (?P<message>.*)  \[(?P<category>.*)\] \[(?P<confidence>\d+)\]'),
+    'eclipse': re.compile(r'(?P<filename>.*):(?P<linenum>\d+): warning: (?P<message>.*)  \[(?P<category>.*)\] \[(?P<confidence>\d+)\]'),
 }
 CPPLINT_STR = ('${CPPLINT} '
                '--verbose=${CPPLINT_LEVEL} '
diff --git a/extras/cython.py b/extras/cython.py
index 481d6f4c..591c274d 100644
--- a/extras/cython.py
+++ b/extras/cython.py
@@ -8,8 +8,9 @@ from waflib.TaskGen import extension
 
 cy_api_pat = re.compile(r'\s*?cdef\s*?(public|api)\w*')
 re_cyt = re.compile(r"""
-	(?:from\s+(\w+)\s+)?   # optionally match "from foo" and capture foo
-	c?import\s(\w+|[*])    # require "import bar" and capture bar
+	^\s*                           # must begin with some whitespace characters
+	(?:from\s+(\w+)(?:\.\w+)*\s+)? # optionally match "from foo(.baz)" and capture foo
+	c?import\s(\w+|[*])            # require "import bar" and capture bar
 	""", re.M | re.VERBOSE)
 
 @extension('.pyx')
@@ -85,12 +86,12 @@ class cython(Task.Task):
 		node = self.inputs[0]
 		txt = node.read()
 
-		mods = []
+		mods = set()
 		for m in re_cyt.finditer(txt):
 			if m.group(1):  # matches "from foo import bar"
-				mods.append(m.group(1))
+				mods.add(m.group(1))
 			else:
-				mods.append(m.group(2))
+				mods.add(m.group(2))
 
 		Logs.debug('cython: mods %r', mods)
 		incs = getattr(self.generator, 'cython_includes', [])
@@ -99,7 +100,7 @@ class cython(Task.Task):
 
 		found = []
 		missing = []
-		for x in mods:
+		for x in sorted(mods):
 			for y in incs:
 				k = y.find_resource(x + '.pxd')
 				if k:
diff --git a/extras/distnet.py b/extras/distnet.py
index 09a31a6d..ff3ed8e1 100644
--- a/extras/distnet.py
+++ b/extras/distnet.py
@@ -44,7 +44,7 @@ TARFORMAT = 'w:bz2'
 TIMEOUT = 60
 REQUIRES = 'requires.txt'
 
-re_com = re.compile('\s*#.*', re.M)
+re_com = re.compile(r'\s*#.*', re.M)
 
 def total_version_order(num):
 	lst = num.split('.')
diff --git a/extras/erlang.py b/extras/erlang.py
index 49f6d5b4..0b93d9a4 100644
--- a/extras/erlang.py
+++ b/extras/erlang.py
@@ -51,7 +51,7 @@ class erl(Task.Task):
 			if n.abspath() in scanned:
 				continue
 
-			for i in re.findall('-include\("(.*)"\)\.', n.read()):
+			for i in re.findall(r'-include\("(.*)"\)\.', n.read()):
 				for d in task.erlc_incnodes:
 					r = d.find_node(i)
 					if r:
diff --git a/extras/fast_partial.py b/extras/fast_partial.py
index b3af513b..d5b61448 100644
--- a/extras/fast_partial.py
+++ b/extras/fast_partial.py
@@ -17,7 +17,7 @@ Usage::
 	def options(opt):
 		opt.load('fast_partial')
 
-Assuptions:
+Assumptions:
 * Mostly for C/C++/Fortran targets with link tasks (object-only targets are not handled)
 * For full project builds: no --targets and no pruning from subfolders
 * The installation phase is ignored
diff --git a/extras/fc_nfort.py b/extras/fc_nfort.py
new file mode 100644
index 00000000..c25886b8
--- /dev/null
+++ b/extras/fc_nfort.py
@@ -0,0 +1,52 @@
+#! /usr/bin/env python
+# encoding: utf-8
+# Detection of the NEC Fortran compiler for Aurora Tsubasa
+
+import re
+from waflib.Tools import fc,fc_config,fc_scan
+from waflib.Configure import conf
+from waflib.Tools.compiler_fc import fc_compiler
+fc_compiler['linux'].append('fc_nfort')
+
+@conf
+def find_nfort(conf):
+	fc=conf.find_program(['nfort'],var='FC')
+	conf.get_nfort_version(fc)
+	conf.env.FC_NAME='NFORT'
+	conf.env.FC_MOD_CAPITALIZATION='lower'
+
+@conf
+def nfort_flags(conf):
+	v=conf.env
+	v['_FCMODOUTFLAGS']=[]
+	v['FCFLAGS_DEBUG']=[]
+	v['FCFLAGS_fcshlib']=[]
+	v['LINKFLAGS_fcshlib']=[]
+	v['FCSTLIB_MARKER']=''
+	v['FCSHLIB_MARKER']=''
+
+@conf
+def get_nfort_version(conf,fc):
+	version_re=re.compile(r"nfort\s*\(NFORT\)\s*(?P<major>\d+)\.(?P<minor>\d+)\.",re.I).search
+	cmd=fc+['--version']
+	out,err=fc_config.getoutput(conf,cmd,stdin=False)
+	if out:
+		match=version_re(out)
+	else:
+		match=version_re(err)
+	if not match:
+		return(False)
+		conf.fatal('Could not determine the NEC NFORT Fortran compiler version.')
+	else:
+		k=match.groupdict()
+		conf.env['FC_VERSION']=(k['major'],k['minor'])
+
+def configure(conf):
+	conf.find_nfort()
+	conf.find_program('nar',var='AR')
+	conf.add_os_flags('ARFLAGS')
+	if not conf.env.ARFLAGS:
+		conf.env.ARFLAGS=['rcs']
+	conf.fc_flags()
+	conf.fc_add_flags()
+	conf.nfort_flags()
diff --git a/extras/gccdeps.py b/extras/gccdeps.py
index d9758ab3..bfabe72e 100644
--- a/extras/gccdeps.py
+++ b/extras/gccdeps.py
@@ -36,7 +36,7 @@ def scan(self):
 	names = []
 	return (nodes, names)
 
-re_o = re.compile("\.o$")
+re_o = re.compile(r"\.o$")
 re_splitter = re.compile(r'(?<!\\)\s+') # split by space, except when spaces are escaped
 
 def remove_makefile_rule_lhs(line):
@@ -197,7 +197,7 @@ def configure(conf):
 		except Errors.ConfigurationError:
 			pass
 		else:
-			conf.env.append_value('CFLAGS', gccdeps_flags)
+			conf.env.append_value('CFLAGS', flags)
 			conf.env.append_unique('ENABLE_GCCDEPS', 'c')
 
 	if conf.env.CXX_NAME in supported_compilers:
@@ -206,7 +206,7 @@ def configure(conf):
 		except Errors.ConfigurationError:
 			pass
 		else:
-			conf.env.append_value('CXXFLAGS', gccdeps_flags)
+			conf.env.append_value('CXXFLAGS', flags)
 			conf.env.append_unique('ENABLE_GCCDEPS', 'cxx')
 
 def options(opt):
diff --git a/extras/kde4.py b/extras/kde4.py
index e49a9ec0..aed9bfb5 100644
--- a/extras/kde4.py
+++ b/extras/kde4.py
@@ -71,7 +71,7 @@ def configure(self):
 	fu = re.compile('#(.*)\n')
 	txt = fu.sub('', txt)
 
-	setregexp = re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
+	setregexp = re.compile(r'([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
 	found = setregexp.findall(txt)
 
 	for (_, key, val) in found:
diff --git a/extras/lv2.py b/extras/lv2.py
index 815987fc..ffcb2e77 100644
--- a/extras/lv2.py
+++ b/extras/lv2.py
@@ -10,6 +10,33 @@ def options(opt):
                          help='install LV2 bundles to user location')
     conf_opts.add_option('--lv2dir', type='string',
                          help='LV2 bundles [Default: LIBDIR/lv2]')
+
+def register_lv2_path(conf, path):
+    """Return the default LV2_PATH to use for this system"""
+    if 'LV2_PATH' not in conf.run_env and 'LV2_PATH' not in os.environ:
+        conf.run_env['LV2_PATH'] = [conf.env['LV2DIR']]
+
+    conf.run_env.append_unique('LV2_PATH', path)
+
+def default_lv2_path(conf):
+    """Return the default LV2_PATH for the build target as a list"""
+    if conf.env.DEST_OS == 'darwin':
+        return ['~/Library/Audio/Plug-Ins/LV2',
+                '~/.lv2',
+                '/usr/local/lib/lv2',
+                '/usr/lib/lv2',
+                '/Library/Audio/Plug-Ins/LV2']
+    elif conf.env.DEST_OS == 'haiku':
+        return ['~/.lv2',
+                '/boot/common/add-ons/lv2']
+    elif conf.env.DEST_OS == 'win32':
+        return ['%APPDATA%\\\\LV2',
+                '%COMMONPROGRAMFILES%\\\\LV2']
+    else:
+        libdirname = os.path.basename(conf.env.LIBDIR)
+        return ['~/.lv2',
+                '/usr/%s/lv2' % libdirname,
+                '/usr/local/%s/lv2' % libdirname]
     
 def configure(conf):
     def env_path(parent_dir_var, name):
@@ -43,5 +70,6 @@ def configure(conf):
         else:
             conf.env['LV2DIR'] = os.path.join(conf.env['LIBDIR'], 'lv2')
 
-    conf.env['LV2DIR'] = normpath(conf.env['LV2DIR'])
-
+    # Add default LV2_PATH to runtime environment for tests that use plugins
+    if 'LV2_PATH' not in os.environ:
+        conf.run_env['LV2_PATH'] = default_lv2_path(conf)
diff --git a/extras/ocaml.py b/extras/ocaml.py
index afe73c0c..7d785c6f 100644
--- a/extras/ocaml.py
+++ b/extras/ocaml.py
@@ -15,7 +15,7 @@ EXT_MLI = ['.mli']
 EXT_MLC = ['.c']
 EXT_ML  = ['.ml']
 
-open_re = re.compile('^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M)
+open_re = re.compile(r'^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M)
 foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M)
 def filter_comments(txt):
 	meh = [0]
diff --git a/extras/parallel_debug.py b/extras/parallel_debug.py
index 35883a3d..4ffec5e5 100644
--- a/extras/parallel_debug.py
+++ b/extras/parallel_debug.py
@@ -3,13 +3,16 @@
 # Thomas Nagy, 2007-2010 (ita)
 
 """
-Debugging helper for parallel compilation, outputs
-a file named pdebug.svg in the source directory::
+Debugging helper for parallel compilation.
+
+Copy it to your project and load it with::
 
 	def options(opt):
-		opt.load('parallel_debug')
+		opt.load('parallel_debug', tooldir='.')
 	def build(bld):
 		...
+
+The build will then output a file named pdebug.svg in the source directory.
 """
 
 import re, sys, threading, time, traceback
diff --git a/extras/pgicc.py b/extras/pgicc.py
index 9790b9cf..f8068d53 100644
--- a/extras/pgicc.py
+++ b/extras/pgicc.py
@@ -60,7 +60,7 @@ def get_pgi_version(conf, cc):
 	except Errors.WafError:
 		conf.fatal('Could not find pgi compiler %r' % cmd)
 
-	version = re.findall('^COMPVER\s*=(.*)', out, re.M)
+	version = re.findall(r'^COMPVER\s*=(.*)', out, re.M)
 	if len(version) != 1:
 		conf.fatal('Could not determine the compiler version')
 	return version[0]
diff --git a/extras/protoc.py b/extras/protoc.py
index f3cb4d86..839c510b 100644
--- a/extras/protoc.py
+++ b/extras/protoc.py
@@ -6,7 +6,7 @@
 import re, os
 from waflib.Task import Task
 from waflib.TaskGen import extension
-from waflib import Errors, Context
+from waflib import Errors, Context, Logs
 
 """
 A simple tool to integrate protocol buffers into your build system.
@@ -67,6 +67,13 @@ Example for Java:
                 protoc_includes = ['inc']) # for protoc to search dependencies
 
 
+Protoc includes passed via protoc_includes are either relative to the taskgen
+or to the project and are searched in this order.
+
+Include directories external to the waf project can also be passed to the
+extra by using protoc_extincludes
+
+                protoc_extincludes = ['/usr/include/pblib']
 
 
 Notes when using this tool:
@@ -82,7 +89,7 @@ Notes when using this tool:
 """
 
 class protoc(Task):
-	run_str = '${PROTOC} ${PROTOC_FL:PROTOC_FLAGS} ${PROTOC_ST:INCPATHS} ${PROTOC_ST:PROTOC_INCPATHS} ${SRC[0].bldpath()}'
+	run_str = '${PROTOC} ${PROTOC_FL:PROTOC_FLAGS} ${PROTOC_ST:INCPATHS} ${PROTOC_ST:PROTOC_INCPATHS} ${PROTOC_ST:PROTOC_EXTINCPATHS} ${SRC[0].bldpath()}'
 	color   = 'BLUE'
 	ext_out = ['.h', 'pb.cc', '.py', '.java']
 	def scan(self):
@@ -104,7 +111,17 @@ class protoc(Task):
 
 		if 'py' in self.generator.features or 'javac' in self.generator.features:
 			for incpath in getattr(self.generator, 'protoc_includes', []):
-				search_nodes.append(self.generator.bld.path.find_node(incpath))
+				incpath_node = self.generator.path.find_node(incpath)
+				if incpath_node:
+					search_nodes.append(incpath_node)
+				else:
+					# Check if relative to top-level for extra tg dependencies
+					incpath_node = self.generator.bld.path.find_node(incpath)
+					if incpath_node:
+						search_nodes.append(incpath_node)
+					else:
+						raise Errors.WafError('protoc: include path %r does not exist' % incpath)
+
 
 		def parse_node(node):
 			if node in seen:
@@ -126,7 +143,7 @@ class protoc(Task):
 		parse_node(node)
 		# Add also dependencies path to INCPATHS so protoc will find the included file
 		for deppath in nodes:
-			self.env.append_value('INCPATHS', deppath.parent.bldpath())
+			self.env.append_unique('INCPATHS', deppath.parent.bldpath())
 		return (nodes, names)
 
 @extension('.proto')
@@ -153,61 +170,11 @@ def process_protoc(self, node):
 		protoc_flags.append('--python_out=%s' % node.parent.get_bld().bldpath())
 
 	if 'javac' in self.features:
-		pkgname, javapkg, javacn, nodename = None, None, None, None
-		messages = []
-
-		# .java file name is done with some rules depending on .proto file content:
-		#   -) package is either derived from option java_package if present
-		#      or from package directive
-		#   -) file name is either derived from option java_outer_classname if present
-		#      or the .proto file is converted to camelcase. If a message
-		#      is named the same then the behaviour depends on protoc version
-		#
-		# See also: https://developers.google.com/protocol-buffers/docs/reference/java-generated#invocation
-
-		code = node.read().splitlines()
-		for line in code:
-			m = re.search(r'^package\s+(.*);', line)
-			if m:
-				pkgname = m.groups()[0]
-			m = re.search(r'^option\s+(\S*)\s*=\s*"(\S*)";', line)
-			if m:
-				optname = m.groups()[0]
-				if optname == 'java_package':
-					javapkg = m.groups()[1]
-				elif optname == 'java_outer_classname':
-					javacn = m.groups()[1]
-			if self.env.PROTOC_MAJOR > '2':
-				m = re.search(r'^message\s+(\w*)\s*{*', line)
-				if m:
-					messages.append(m.groups()[0])
-
-		if javapkg:
-			nodename = javapkg
-		elif pkgname:
-			nodename = pkgname
-		else:
-			raise Errors.WafError('Cannot derive java name from protoc file')
-
-		nodename = nodename.replace('.',os.sep) + os.sep
-		if javacn:
-			nodename += javacn + '.java'
-		else:
-			if self.env.PROTOC_MAJOR > '2' and node.abspath()[node.abspath().rfind(os.sep)+1:node.abspath().rfind('.')].title() in messages:
-				nodename += node.abspath()[node.abspath().rfind(os.sep)+1:node.abspath().rfind('.')].title().replace('_','') + 'OuterClass.java'
-			else:
-				nodename += node.abspath()[node.abspath().rfind(os.sep)+1:node.abspath().rfind('.')].title().replace('_','') + '.java'
-
-		java_node = node.parent.find_or_declare(nodename)
-		out_nodes.append(java_node)
-		protoc_flags.append('--java_out=%s' % node.parent.get_bld().bldpath())
-
 		# Make javac get also pick java code generated in build
 		if not node.parent.get_bld() in self.javac_task.srcdir:
 			self.javac_task.srcdir.append(node.parent.get_bld())
 
-	if not out_nodes:
-		raise Errors.WafError('Feature %r not supported by protoc extra' % self.features)
+		protoc_flags.append('--java_out=%s' % node.parent.get_bld().bldpath())
 
 	tsk = self.create_task('protoc', node, out_nodes)
 	tsk.env.append_value('PROTOC_FLAGS', protoc_flags)
@@ -219,9 +186,22 @@ def process_protoc(self, node):
 	# For C++ standard include files dirs are used,
 	# but this doesn't apply to Python for example
 	for incpath in getattr(self, 'protoc_includes', []):
-		incdirs.append(self.path.find_node(incpath).bldpath())
+		incpath_node = self.path.find_node(incpath)
+		if incpath_node:
+			incdirs.append(incpath_node.bldpath())
+		else:
+			# Check if relative to top-level for extra tg dependencies
+			incpath_node = self.bld.path.find_node(incpath)
+			if incpath_node:
+				incdirs.append(incpath_node.bldpath())
+			else:
+				raise Errors.WafError('protoc: include path %r does not exist' % incpath)
+
 	tsk.env.PROTOC_INCPATHS = incdirs
 
+	# Include paths external to the waf project (ie. shared pb repositories)
+	tsk.env.PROTOC_EXTINCPATHS = getattr(self, 'protoc_extincludes', [])
+
 	# PR2115: protoc generates output of .proto files in nested
 	# directories  by canonicalizing paths. To avoid this we have to pass
 	# as first include the full directory file of the .proto file
diff --git a/extras/pyqt5.py b/extras/pyqt5.py
index c21dfa72..80f43b88 100644
--- a/extras/pyqt5.py
+++ b/extras/pyqt5.py
@@ -111,9 +111,9 @@ def apply_pyqt5(self):
 	"""
 	The additional parameters are:
 
-	:param lang: list of translation files (\*.ts) to process
+	:param lang: list of translation files (\\*.ts) to process
 	:type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension
-	:param langname: if given, transform the \*.ts files into a .qrc files to include in the binary file
+	:param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file
 	:type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension
 	"""
 	if getattr(self, 'lang', None):
diff --git a/extras/qt4.py b/extras/qt4.py
index 90cae7e0..d19a4dda 100644
--- a/extras/qt4.py
+++ b/extras/qt4.py
@@ -290,11 +290,11 @@ def apply_qt4(self):
 
 	The additional parameters are:
 
-	:param lang: list of translation files (\*.ts) to process
+	:param lang: list of translation files (\\*.ts) to process
 	:type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension
-	:param update: whether to process the C++ files to update the \*.ts files (use **waf --translate**)
+	:param update: whether to process the C++ files to update the \\*.ts files (use **waf --translate**)
 	:type update: bool
-	:param langname: if given, transform the \*.ts files into a .qrc files to include in the binary file
+	:param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file
 	:type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension
 	"""
 	if getattr(self, 'lang', None):
diff --git a/extras/remote.py b/extras/remote.py
index 3b038f77..f43b600f 100644
--- a/extras/remote.py
+++ b/extras/remote.py
@@ -203,7 +203,7 @@ class remote(BuildContext):
 					Options.commands.remove(k)
 
 	def login_to_host(self, login):
-		return re.sub('(\w+@)', '', login)
+		return re.sub(r'(\w+@)', '', login)
 
 	def variant_to_login(self, variant):
 		"""linux_32_debug -> search env.LINUX_32 and then env.LINUX"""
diff --git a/extras/run_do_script.py b/extras/run_do_script.py
index f3c58122..07e3aa25 100644
--- a/extras/run_do_script.py
+++ b/extras/run_do_script.py
@@ -101,7 +101,7 @@ class run_do_script(run_do_script_base):
 		with open(**kwargs) as log:
 			log_tail = log.readlines()[-10:]
 			for line in log_tail:
-				error_found = re.match("r\(([0-9]+)\)", line)
+				error_found = re.match(r"r\(([0-9]+)\)", line)
 				if error_found:
 					return error_found.group(1), ''.join(log_tail)
 				else:
diff --git a/extras/swig.py b/extras/swig.py
index fd3d6d2c..740ab46d 100644
--- a/extras/swig.py
+++ b/extras/swig.py
@@ -17,10 +17,10 @@ tasks have to be added dynamically:
 
 SWIG_EXTS = ['.swig', '.i']
 
-re_module = re.compile('%module(?:\s*\(.*\))?\s+(.+)', re.M)
+re_module = re.compile(r'%module(?:\s*\(.*\))?\s+(.+)', re.M)
 
 re_1 = re.compile(r'^%module.*?\s+([\w]+)\s*?$', re.M)
-re_2 = re.compile('[#%]include [<"](.*)[">]', re.M)
+re_2 = re.compile(r'[#%](?:include|import(?:\(module=".*"\))+|python(?:begin|code)) [<"](.*)[">]', re.M)
 
 class swig(Task.Task):
 	color   = 'BLUE'
-- 
cgit v1.2.1