aboutsummaryrefslogtreecommitdiffstats
path: root/test/run_validation_test_suite.py
diff options
context:
space:
mode:
authorDavid Robillard <d@drobilla.net>2021-03-08 23:25:35 -0500
committerDavid Robillard <d@drobilla.net>2021-03-09 01:43:52 -0500
commit7b954f5667e82de1b64984a9aeb26b8ebb5cab81 (patch)
tree5668f80ce2dc7a52cf66bbe2f4e4429b18f09e08 /test/run_validation_test_suite.py
parentc579186c5dd4e11bffddd353cef8978a66ef9c10 (diff)
downloadserd-7b954f5667e82de1b64984a9aeb26b8ebb5cab81.tar.gz
serd-7b954f5667e82de1b64984a9aeb26b8ebb5cab81.tar.bz2
serd-7b954f5667e82de1b64984a9aeb26b8ebb5cab81.zip
WIP: Validationserd1-meson
Diffstat (limited to 'test/run_validation_test_suite.py')
-rwxr-xr-xtest/run_validation_test_suite.py200
1 files changed, 91 insertions, 109 deletions
diff --git a/test/run_validation_test_suite.py b/test/run_validation_test_suite.py
index a27e55de..34a213f0 100755
--- a/test/run_validation_test_suite.py
+++ b/test/run_validation_test_suite.py
@@ -2,12 +2,7 @@
"""Run the serd RDF validation test suite."""
-import serd_test_util
-
import argparse
-import datetime
-import difflib
-import itertools
import os
import re
import shlex
@@ -16,11 +11,24 @@ import sys
import tempfile
import urllib.parse
+import serd_test_util
+
+NS_CHECKS = "http://drobilla.net/ns/serd/checks#"
+NS_MF = "http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#"
+NS_SERD = "http://drobilla.net/ns/serd#"
+
+
+def log_error(message):
+ """Log an error message to stderr"""
+
+ sys.stderr.write("error: ")
+ sys.stderr.write(message)
+
def _uri_path(uri):
path = urllib.parse.urlparse(uri).path
drive = os.path.splitdrive(path[1:])[0]
- return path if not drive else path[1:]
+ return os.path.realpath(path) if not drive else path[1:]
def _load_rdf(filename, base_uri, command_prefix):
@@ -54,57 +62,55 @@ def _load_rdf(filename, base_uri, command_prefix):
return model, instances
-def _option_combinations(options):
- """Return an iterator that cycles through all combinations of options."""
-
- combinations = []
- for count in range(len(options) + 1):
- combinations += list(itertools.combinations(options, count))
-
- return itertools.cycle(combinations)
+def _run_positive_test(command, out_filename):
+ command_string = " ".join([shlex.quote(c) for c in command])
+ with open(out_filename, "w") as stdout:
+ proc = subprocess.run(command, check=False, stdout=stdout)
+ if proc.returncode != 0:
+ log_error("Unexpected command failure failure\n")
+ sys.stderr.write(command_string + "\n")
+ return 1
-def _show_diff(from_lines, to_lines, from_filename, to_filename):
- same = True
- for line in difflib.unified_diff(
- from_lines,
- to_lines,
- fromfile=os.path.abspath(from_filename),
- tofile=os.path.abspath(to_filename),
- ):
- sys.stderr.write(line)
- same = False
+ return proc.returncode
- return same
+ return 1
-def _file_equals(patha, pathb):
+def _run_negative_test(command, check_name, out_filename):
+ command_string = " ".join([shlex.quote(c) for c in command])
- for path in (patha, pathb):
- if not os.access(path, os.F_OK):
- sys.stderr.write("error: missing file {}\n".format(path))
- return False
-
- with open(patha, "r", encoding="utf-8") as fa:
- with open(pathb, "r", encoding="utf-8") as fb:
- return _show_diff(fa.readlines(), fb.readlines(), patha, pathb)
+ with open(out_filename, "w") as stdout:
+ with tempfile.TemporaryFile() as stderr:
+ proc = subprocess.run(
+ command, check=False, stdout=stdout, stderr=stderr
+ )
+ # Check that serdi returned with status SERD_ERR_INVALID
+ if proc.returncode != 16:
+ log_error("Unexpected status {}\n".format(proc.returncode))
+ sys.stderr.write(command_string + "\n")
+ return 1
-def _file_lines_equal(patha, pathb, subst_from="", subst_to=""):
- import io
+ # Check that an error message was printed
+ stderr.seek(0, 2) # Seek to end
+ if stderr.tell() == 0: # Empty
+ log_error("No error message printed\n")
+ sys.stderr.write(command_string + "\n")
+ return 1
- for path in (patha, pathb):
- if not os.access(path, os.F_OK):
- sys.stderr.write("error: missing file %s\n" % path)
- return False
+ # Check that the expected check printed an error message
+ stderr.seek(0) # Seek to start
+ err_output = stderr.read().decode("utf-8")
+ if check_name and "[{}]".format(check_name) not in err_output:
+ log_error("Test didn't trigger {}\n".format(check_name))
+ sys.stderr.write(command_string + "\n")
+ sys.stderr.write(err_output + "\n")
+ return 1
- la = sorted(set(io.open(patha, encoding="utf-8").readlines()))
- lb = sorted(set(io.open(pathb, encoding="utf-8").readlines()))
- if la != lb:
- _show_diff(la, lb, patha, pathb)
- return False
+ return 0
- return True
+ return 1
def validation_test_suite(
@@ -112,12 +118,10 @@ def validation_test_suite(
schemas,
base_uri,
report_filename,
- isyntax,
command_prefix,
):
"""Run all tests in a test suite manifest."""
- mf = "http://www.w3.org/2001/sw/DataAccess/tests/test-manifest#"
test_dir = os.path.dirname(manifest_path)
model, instances = serd_test_util.load_rdf(
manifest_path, base_uri, command_prefix
@@ -133,71 +137,53 @@ def validation_test_suite(
asserter = "http://drobilla.net/drobilla#me"
class Results:
+ """Aggregated count of all tests and results."""
def __init__(self):
self.n_tests = 0
self.n_failures = 0
- def run_tests(test_class, tests, expected_return, results):
+ def run_tests(tests, expected_return, results):
for test in sorted(tests):
- test_uri = model[test][mf + "action"][0]
+ test_uri = model[test][NS_MF + "action"][0]
test_uri_path = _uri_path(test_uri)
test_name = os.path.basename(test_uri_path)
test_path = os.path.join(test_dir, test_name)
-
- command = (
- command_prefix
- + [
- "-V",
- "-I",
- test_uri,
- test_path,
- ]
- + schemas
- )
out_filename = os.path.join(out_test_dir, test_name + ".out")
results.n_tests += 1
if expected_return == 0: # Positive test
+ options = ["-V", "all", "-I", test_uri]
+ command = command_prefix + options + schemas + [test_path]
- with open(out_filename, "w") as stdout:
- proc = subprocess.run(command, check=False, stdout=stdout)
- if proc.returncode == 0:
- passed = True
- else:
- results.n_failures += 1
- sys.stderr.write(
- "error: Unexpected failure of command: {}\n".format(
- " ".join(shlex.quote(c) for c in command)
- )
- )
+ status = _run_positive_test(command, out_filename)
+ passed = status == 0
+ results.n_failures += status
else: # Negative test
- with open(out_filename, "w") as stdout:
- with tempfile.TemporaryFile() as stderr:
- proc = subprocess.run(
- command, check=False, stdout=stdout, stderr=stderr
- )
-
- if proc.returncode != 0:
- passed = True
- else:
- results.n_failures += 1
- sys.stderr.write(
- "error: Unexpected success of command: {}\n".format(
- " ".join(shlex.quote(c) for c in command)
- )
- )
-
- # Check that an error message was printed
- stderr.seek(0, 2) # Seek to end
- if stderr.tell() == 0: # Empty
- sys.stderr.write(
- "error: No error message printed by command: {}\n".format(
- " ".join(shlex.quote(c) for c in command)
- )
- )
- result = 1
+ if NS_SERD + "triggersCheck" not in model[test]:
+ log_error("{} has no serd:triggersCheck".format(test_name))
+
+ check_names = []
+ if NS_SERD + "triggersCheck" in model[test]:
+ for check in model[test][NS_SERD + "triggersCheck"]:
+ check_names += [check[len(NS_CHECKS) :]]
+
+ # FIXME: doesn't work
+ # options = ["-I", test_uri]
+ options = []
+ for check_name in check_names:
+ options += ["-V", check_name]
+
+ options += ["-I", test_uri]
+
+ # options = ["-V", "instanceType", "-V", "propertyRange", "-V", "propertyDomain", "-V", check_name, "-I", test_uri]
+ # options = ["-V", "all", "-I", test_uri]
+ command = command_prefix + options + schemas + [test_path]
+
+ status = _run_negative_test(command, check_name, out_filename)
+ passed = status == 0
+ results.n_failures += status
# Write test report entry
if report_filename:
@@ -212,14 +198,12 @@ def validation_test_suite(
for test_class, instances in instances.items():
if test_class.startswith(ns_serd):
expected = 1 if "Negative" in test_class else 0
- run_tests(test_class, instances, expected, results)
+ run_tests(instances, expected, results)
# Print result summary
if results.n_failures > 0:
- sys.stderr.write(
- "error: {}/{} tests failed\n".format(
- results.n_failures, results.n_tests
- )
+ log_error(
+ "{}/{} tests failed\n".format(results.n_failures, results.n_tests)
)
else:
sys.stdout.write("All {} tests passed\n".format(results.n_tests))
@@ -238,7 +222,6 @@ def main():
parser.add_argument("--report", help="path to write result report to")
parser.add_argument("--serdi", default="serdi", help="path to serdi")
- parser.add_argument("--syntax", default="turtle", help="input syntax")
parser.add_argument("--wrapper", default="", help="executable wrapper")
parser.add_argument("manifest", help="test suite manifest.ttl file")
parser.add_argument("base_uri", help="base URI for tests")
@@ -252,7 +235,6 @@ def main():
args.schema,
args.base_uri,
args.report,
- args.syntax,
command_prefix,
)
@@ -260,9 +242,9 @@ def main():
if __name__ == "__main__":
try:
sys.exit(main())
- except subprocess.CalledProcessError as e:
- if e.stderr is not None:
- sys.stderr.write(e.stderr.decode("utf-8"))
+ except subprocess.CalledProcessError as error:
+ if error.stderr is not None:
+ sys.stderr.write(error.stderr.decode("utf-8"))
- sys.stderr.write("error: %s\n" % e)
- sys.exit(e.returncode)
+ log_error(str(error) + "\n")
+ sys.exit(error.returncode)