diff options
author | Jelmer Vernooij <jelmer@samba.org> | 2010-04-10 22:35:57 +0200 |
---|---|---|
committer | Jelmer Vernooij <jelmer@samba.org> | 2010-04-10 22:38:33 +0200 |
commit | 664eacc53a50415cd4f5534386501f640228f6c3 (patch) | |
tree | 866be7a0891f3553234933365fba748db8ab2032 /selftest | |
parent | 64bf8c400c8076316929cb6f06346bdf1c48d9d7 (diff) | |
download | samba-664eacc53a50415cd4f5534386501f640228f6c3.tar.gz samba-664eacc53a50415cd4f5534386501f640228f6c3.tar.bz2 samba-664eacc53a50415cd4f5534386501f640228f6c3.zip |
subunit: Don't abort when receiving test results from tests that weren't
announced.
Diffstat (limited to 'selftest')
-rw-r--r-- | selftest/subunithelper.py | 43 |
1 files changed, 32 insertions, 11 deletions
diff --git a/selftest/subunithelper.py b/selftest/subunithelper.py index 2c5fa318c5..545178ea99 100644 --- a/selftest/subunithelper.py +++ b/selftest/subunithelper.py @@ -68,18 +68,36 @@ def parse_results(msg_ops, statistics, fh): else: reason = None if result in ("success", "successful"): - open_tests.pop() #FIXME: Check that popped value == $testname - statistics['TESTS_EXPECTED_OK']+=1 - msg_ops.end_test(testname, "success", False, reason) + try: + open_tests.remove(testname) + except KeyError: + statistics['TESTS_ERROR']+=1 + msg_ops.end_test(testname, "error", True, + "Test was never started") + else: + statistics['TESTS_EXPECTED_OK']+=1 + msg_ops.end_test(testname, "success", False, reason) elif result in ("xfail", "knownfail"): - open_tests.pop() #FIXME: Check that popped value == $testname - statistics['TESTS_EXPECTED_FAIL']+=1 - msg_ops.end_test(testname, "xfail", False, reason) - expected_fail+=1 + try: + open_tests.remove(testname) + except KeyError: + statistics['TESTS_ERROR']+=1 + msg_ops.end_test(testname, "error", True, + "Test was never started") + else: + statistics['TESTS_EXPECTED_FAIL']+=1 + msg_ops.end_test(testname, "xfail", False, reason) + expected_fail+=1 elif result in ("failure", "fail"): - open_tests.pop() #FIXME: Check that popped value == $testname - statistics['TESTS_UNEXPECTED_FAIL']+=1 - msg_ops.end_test(testname, "failure", True, reason) + try: + open_tests.remove(testname) + except KeyError: + statistics['TESTS_ERROR']+=1 + msg_ops.end_test(testname, "error", True, + "Test was never started") + else: + statistics['TESTS_UNEXPECTED_FAIL']+=1 + msg_ops.end_test(testname, "failure", True, reason) elif result == "skip": statistics['TESTS_SKIP']+=1 # Allow tests to be skipped without prior announcement of test @@ -89,7 +107,10 @@ def parse_results(msg_ops, statistics, fh): msg_ops.end_test(testname, "skip", False, reason) elif result == "error": statistics['TESTS_ERROR']+=1 - open_tests.pop() #FIXME: Check that popped value == $testname + try: + open_tests.remove(testname) + except KeyError: + pass msg_ops.end_test(testname, "error", True, reason) elif result == "skip-testsuite": msg_ops.skip_testsuite(testname) |