summaryrefslogtreecommitdiff
path: root/lib/testtools/tests
diff options
context:
space:
mode:
authorJelmer Vernooij <jelmer@samba.org>2010-03-31 03:19:18 +0200
committerJelmer Vernooij <jelmer@samba.org>2010-03-31 03:19:18 +0200
commita8ac7fda573a924debf165d39eff3c1837240d4f (patch)
tree6085df961a5fbcc57a240d7065d2fc4a258cf170 /lib/testtools/tests
parente4af3afd7ae3e39218b42a42d39c2ec10be9a642 (diff)
downloadsamba-a8ac7fda573a924debf165d39eff3c1837240d4f.tar.gz
samba-a8ac7fda573a924debf165d39eff3c1837240d4f.tar.bz2
samba-a8ac7fda573a924debf165d39eff3c1837240d4f.zip
Put testtools directly under lib/ to make it easier to install from Samba 4.
Diffstat (limited to 'lib/testtools/tests')
-rw-r--r--lib/testtools/tests/__init__.py30
-rw-r--r--lib/testtools/tests/helpers.py67
-rw-r--r--lib/testtools/tests/test_content.py72
-rw-r--r--lib/testtools/tests/test_content_type.py34
-rw-r--r--lib/testtools/tests/test_matchers.py171
-rw-r--r--lib/testtools/tests/test_runtest.py185
-rw-r--r--lib/testtools/tests/test_testresult.py807
-rw-r--r--lib/testtools/tests/test_testsuite.py56
-rw-r--r--lib/testtools/tests/test_testtools.py755
9 files changed, 2177 insertions, 0 deletions
diff --git a/lib/testtools/tests/__init__.py b/lib/testtools/tests/__init__.py
new file mode 100644
index 0000000000..2cceba91e2
--- /dev/null
+++ b/lib/testtools/tests/__init__.py
@@ -0,0 +1,30 @@
+"""Tests for testtools itself."""
+
+# See README for copyright and licensing details.
+
+import unittest
+from testtools.tests import (
+ test_content,
+ test_content_type,
+ test_matchers,
+ test_runtest,
+ test_testtools,
+ test_testresult,
+ test_testsuite,
+ )
+
+
+def test_suite():
+ suites = []
+ modules = [
+ test_content,
+ test_content_type,
+ test_matchers,
+ test_runtest,
+ test_testresult,
+ test_testsuite,
+ test_testtools,
+ ]
+ for module in modules:
+ suites.append(getattr(module, 'test_suite')())
+ return unittest.TestSuite(suites)
diff --git a/lib/testtools/tests/helpers.py b/lib/testtools/tests/helpers.py
new file mode 100644
index 0000000000..c4cf10c736
--- /dev/null
+++ b/lib/testtools/tests/helpers.py
@@ -0,0 +1,67 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+"""Helpers for tests."""
+
+import sys
+
+__metaclass__ = type
+__all__ = [
+ 'LoggingResult',
+ ]
+
+from testtools import TestResult
+
+
+try:
+ raise Exception
+except Exception:
+ an_exc_info = sys.exc_info()
+
+# Deprecated: This classes attributes are somewhat non deterministic which
+# leads to hard to predict tests (because Python upstream are changing things.
+class LoggingResult(TestResult):
+ """TestResult that logs its event to a list."""
+
+ def __init__(self, log):
+ self._events = log
+ super(LoggingResult, self).__init__()
+
+ def startTest(self, test):
+ self._events.append(('startTest', test))
+ super(LoggingResult, self).startTest(test)
+
+ def stopTest(self, test):
+ self._events.append(('stopTest', test))
+ super(LoggingResult, self).stopTest(test)
+
+ def addFailure(self, test, error):
+ self._events.append(('addFailure', test, error))
+ super(LoggingResult, self).addFailure(test, error)
+
+ def addError(self, test, error):
+ self._events.append(('addError', test, error))
+ super(LoggingResult, self).addError(test, error)
+
+ def addSkip(self, test, reason):
+ self._events.append(('addSkip', test, reason))
+ super(LoggingResult, self).addSkip(test, reason)
+
+ def addSuccess(self, test):
+ self._events.append(('addSuccess', test))
+ super(LoggingResult, self).addSuccess(test)
+
+ def startTestRun(self):
+ self._events.append('startTestRun')
+ super(LoggingResult, self).startTestRun()
+
+ def stopTestRun(self):
+ self._events.append('stopTestRun')
+ super(LoggingResult, self).stopTestRun()
+
+ def done(self):
+ self._events.append('done')
+ super(LoggingResult, self).done()
+
+# Note, the following three classes are different to LoggingResult by
+# being fully defined exact matches rather than supersets.
+from testtools.testresult.doubles import *
diff --git a/lib/testtools/tests/test_content.py b/lib/testtools/tests/test_content.py
new file mode 100644
index 0000000000..1159362036
--- /dev/null
+++ b/lib/testtools/tests/test_content.py
@@ -0,0 +1,72 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+import unittest
+from testtools.content import Content, TracebackContent
+from testtools.content_type import ContentType
+from testtools.utils import _u
+from testtools.tests.helpers import an_exc_info
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
+
+
+class TestContent(unittest.TestCase):
+
+ def test___init___None_errors(self):
+ self.assertRaises(ValueError, Content, None, None)
+ self.assertRaises(ValueError, Content, None, lambda: ["traceback"])
+ self.assertRaises(ValueError, Content,
+ ContentType("text", "traceback"), None)
+
+ def test___init___sets_ivars(self):
+ content_type = ContentType("foo", "bar")
+ content = Content(content_type, lambda: ["bytes"])
+ self.assertEqual(content_type, content.content_type)
+ self.assertEqual(["bytes"], list(content.iter_bytes()))
+
+ def test___eq__(self):
+ content_type = ContentType("foo", "bar")
+ content1 = Content(content_type, lambda: ["bytes"])
+ content2 = Content(content_type, lambda: ["bytes"])
+ content3 = Content(content_type, lambda: ["by", "tes"])
+ content4 = Content(content_type, lambda: ["by", "te"])
+ content5 = Content(ContentType("f", "b"), lambda: ["by", "tes"])
+ self.assertEqual(content1, content2)
+ self.assertEqual(content1, content3)
+ self.assertNotEqual(content1, content4)
+ self.assertNotEqual(content1, content5)
+
+ def test_iter_text_not_text_errors(self):
+ content_type = ContentType("foo", "bar")
+ content = Content(content_type, lambda: ["bytes"])
+ self.assertRaises(ValueError, content.iter_text)
+
+ def test_iter_text_decodes(self):
+ content_type = ContentType("text", "strange", {"charset": "utf8"})
+ content = Content(
+ content_type, lambda: [_u("bytes\xea").encode("utf8")])
+ self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
+
+ def test_iter_text_default_charset_iso_8859_1(self):
+ content_type = ContentType("text", "strange")
+ text = _u("bytes\xea")
+ iso_version = text.encode("ISO-8859-1")
+ content = Content(content_type, lambda: [iso_version])
+ self.assertEqual([text], list(content.iter_text()))
+
+
+class TestTracebackContent(unittest.TestCase):
+
+ def test___init___None_errors(self):
+ self.assertRaises(ValueError, TracebackContent, None, None)
+
+ def test___init___sets_ivars(self):
+ content = TracebackContent(an_exc_info, self)
+ content_type = ContentType("text", "x-traceback",
+ {"language": "python", "charset": "utf8"})
+ self.assertEqual(content_type, content.content_type)
+ result = unittest.TestResult()
+ expected = result._exc_info_to_string(an_exc_info, self)
+ self.assertEqual(expected, ''.join(list(content.iter_text())))
diff --git a/lib/testtools/tests/test_content_type.py b/lib/testtools/tests/test_content_type.py
new file mode 100644
index 0000000000..dbefc21dec
--- /dev/null
+++ b/lib/testtools/tests/test_content_type.py
@@ -0,0 +1,34 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+import unittest
+from testtools.content_type import ContentType
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
+
+
+class TestContentType(unittest.TestCase):
+
+ def test___init___None_errors(self):
+ self.assertRaises(ValueError, ContentType, None, None)
+ self.assertRaises(ValueError, ContentType, None, "traceback")
+ self.assertRaises(ValueError, ContentType, "text", None)
+
+ def test___init___sets_ivars(self):
+ content_type = ContentType("foo", "bar")
+ self.assertEqual("foo", content_type.type)
+ self.assertEqual("bar", content_type.subtype)
+ self.assertEqual({}, content_type.parameters)
+
+ def test___init___with_parameters(self):
+ content_type = ContentType("foo", "bar", {"quux":"thing"})
+ self.assertEqual({"quux":"thing"}, content_type.parameters)
+
+ def test___eq__(self):
+ content_type1 = ContentType("foo", "bar", {"quux":"thing"})
+ content_type2 = ContentType("foo", "bar", {"quux":"thing"})
+ content_type3 = ContentType("foo", "bar", {"quux":"thing2"})
+ self.assertTrue(content_type1.__eq__(content_type2))
+ self.assertFalse(content_type1.__eq__(content_type3))
diff --git a/lib/testtools/tests/test_matchers.py b/lib/testtools/tests/test_matchers.py
new file mode 100644
index 0000000000..74b1ebc56a
--- /dev/null
+++ b/lib/testtools/tests/test_matchers.py
@@ -0,0 +1,171 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+"""Tests for matchers."""
+
+import doctest
+
+from testtools import (
+ Matcher, # check that Matcher is exposed at the top level for docs.
+ TestCase,
+ )
+from testtools.matchers import (
+ Annotate,
+ Equals,
+ DocTestMatches,
+ MatchesAny,
+ MatchesAll,
+ Not,
+ NotEquals,
+ )
+
+
+class TestMatchersInterface:
+
+ def test_matches_match(self):
+ matcher = self.matches_matcher
+ matches = self.matches_matches
+ mismatches = self.matches_mismatches
+ for candidate in matches:
+ self.assertEqual(None, matcher.match(candidate))
+ for candidate in mismatches:
+ mismatch = matcher.match(candidate)
+ self.assertNotEqual(None, mismatch)
+ self.assertNotEqual(None, getattr(mismatch, 'describe', None))
+
+ def test__str__(self):
+ # [(expected, object to __str__)].
+ examples = self.str_examples
+ for expected, matcher in examples:
+ self.assertThat(matcher, DocTestMatches(expected))
+
+ def test_describe_difference(self):
+ # [(expected, matchee, matcher), ...]
+ examples = self.describe_examples
+ for difference, matchee, matcher in examples:
+ mismatch = matcher.match(matchee)
+ self.assertEqual(difference, mismatch.describe())
+
+
+class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
+ matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
+ matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
+
+ str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
+ DocTestMatches("Ran 1 test in ...s")),
+ ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
+ ]
+
+ describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n'
+ ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
+ DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
+
+
+class TestDocTestMatchesSpecific(TestCase):
+
+ def test___init__simple(self):
+ matcher = DocTestMatches("foo")
+ self.assertEqual("foo\n", matcher.want)
+
+ def test___init__flags(self):
+ matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
+ self.assertEqual("bar\n", matcher.want)
+ self.assertEqual(doctest.ELLIPSIS, matcher.flags)
+
+
+class TestEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Equals(1)
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
+
+ describe_examples = [("1 != 2", 2, Equals(1))]
+
+
+class TestNotEqualsInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = NotEquals(1)
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
+
+ describe_examples = [("1 == 1", 1, NotEquals(1))]
+
+
+class TestNotInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = Not(Equals(1))
+ matches_matches = [2]
+ matches_mismatches = [1]
+
+ str_examples = [
+ ("Not(Equals(1))", Not(Equals(1))),
+ ("Not(Equals('1'))", Not(Equals('1')))]
+
+ describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
+
+
+class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
+ matches_matches = ["1", "2"]
+ matches_mismatches = ["3"]
+
+ str_examples = [(
+ "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
+ MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
+ ]
+
+ describe_examples = [("""Differences: [
+Expected:
+ 1
+Got:
+ 3
+
+Expected:
+ 2
+Got:
+ 3
+
+]
+""",
+ "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
+
+
+class TestMatchesAllInterface(TestCase, TestMatchersInterface):
+
+ matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
+ matches_matches = [3, 4]
+ matches_mismatches = [1, 2]
+
+ str_examples = [
+ ("MatchesAll(NotEquals(1), NotEquals(2))",
+ MatchesAll(NotEquals(1), NotEquals(2)))]
+
+ describe_examples = [("""Differences: [
+1 == 1
+]
+""",
+ 1, MatchesAll(NotEquals(1), NotEquals(2)))]
+
+
+class TestAnnotate(TestCase, TestMatchersInterface):
+
+ matches_matcher = Annotate("foo", Equals(1))
+ matches_matches = [1]
+ matches_mismatches = [2]
+
+ str_examples = [
+ ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
+
+ describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/tests/test_runtest.py b/lib/testtools/tests/test_runtest.py
new file mode 100644
index 0000000000..5c46ad1784
--- /dev/null
+++ b/lib/testtools/tests/test_runtest.py
@@ -0,0 +1,185 @@
+# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
+
+"""Tests for the RunTest single test execution logic."""
+
+from testtools import (
+ ExtendedToOriginalDecorator,
+ RunTest,
+ TestCase,
+ TestResult,
+ )
+from testtools.tests.helpers import ExtendedTestResult
+
+
+class TestRunTest(TestCase):
+
+ def make_case(self):
+ class Case(TestCase):
+ def test(self):
+ pass
+ return Case('test')
+
+ def test___init___short(self):
+ run = RunTest("bar")
+ self.assertEqual("bar", run.case)
+ self.assertEqual([], run.handlers)
+
+ def test__init____handlers(self):
+ handlers = [("quux", "baz")]
+ run = RunTest("bar", handlers)
+ self.assertEqual(handlers, run.handlers)
+
+ def test_run_with_result(self):
+ # test.run passes result down to _run_test_method.
+ log = []
+ class Case(TestCase):
+ def _run_test_method(self, result):
+ log.append(result)
+ case = Case('_run_test_method')
+ run = RunTest(case, lambda x: log.append(x))
+ result = TestResult()
+ run.run(result)
+ self.assertEqual(1, len(log))
+ self.assertEqual(result, log[0].decorated)
+
+ def test_run_no_result_manages_new_result(self):
+ log = []
+ run = RunTest(self.make_case(), lambda x: log.append(x) or x)
+ result = run.run()
+ self.assertIsInstance(result.decorated, TestResult)
+
+ def test__run_core_called(self):
+ case = self.make_case()
+ log = []
+ run = RunTest(case, lambda x: x)
+ run._run_core = lambda: log.append('foo')
+ run.run()
+ self.assertEqual(['foo'], log)
+
+ def test__run_user_does_not_catch_keyboard(self):
+ case = self.make_case()
+ def raises():
+ raise KeyboardInterrupt("yo")
+ run = RunTest(case, None)
+ run.result = ExtendedTestResult()
+ self.assertRaises(KeyboardInterrupt, run._run_user, raises)
+ self.assertEqual([], run.result._events)
+
+ def test__run_user_calls_onException(self):
+ case = self.make_case()
+ log = []
+ def handler(exc_info):
+ log.append("got it")
+ self.assertEqual(3, len(exc_info))
+ self.assertIsInstance(exc_info[1], KeyError)
+ self.assertIs(KeyError, exc_info[0])
+ case.addOnException(handler)
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(KeyError, log_exc)])
+ run.result = ExtendedTestResult()
+ status = run._run_user(raises)
+ self.assertEqual(run.exception_caught, status)
+ self.assertEqual([], run.result._events)
+ self.assertEqual(["got it", (run.result, e)], log)
+
+ def test__run_user_can_catch_Exception(self):
+ case = self.make_case()
+ e = Exception('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(Exception, log_exc)])
+ run.result = ExtendedTestResult()
+ status = run._run_user(raises)
+ self.assertEqual(run.exception_caught, status)
+ self.assertEqual([], run.result._events)
+ self.assertEqual([(run.result, e)], log)
+
+ def test__run_user_uncaught_Exception_raised(self):
+ case = self.make_case()
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(ValueError, log_exc)])
+ run.result = ExtendedTestResult()
+ self.assertRaises(KeyError, run._run_user, raises)
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
+ case = self.make_case()
+ def broken_handler(exc_info):
+ # ValueError because thats what we know how to catch - and must
+ # not.
+ raise ValueError('boo')
+ case.addOnException(broken_handler)
+ e = KeyError('Yo')
+ def raises():
+ raise e
+ log = []
+ def log_exc(self, result, err):
+ log.append((result, err))
+ run = RunTest(case, [(ValueError, log_exc)])
+ run.result = ExtendedTestResult()
+ self.assertRaises(ValueError, run._run_user, raises)
+ self.assertEqual([], run.result._events)
+ self.assertEqual([], log)
+
+ def test__run_user_returns_result(self):
+ case = self.make_case()
+ def returns():
+ return 1
+ run = RunTest(case)
+ run.result = ExtendedTestResult()
+ self.assertEqual(1, run._run_user(returns))
+ self.assertEqual([], run.result._events)
+
+ def test__run_one_decorates_result(self):
+ log = []
+ class Run(RunTest):
+ def _run_prepared_result(self, result):
+ log.append(result)
+ return result
+ run = Run(self.make_case(), lambda x: x)
+ result = run._run_one('foo')
+ self.assertEqual([result], log)
+ self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
+ self.assertEqual('foo', result.decorated)
+
+ def test__run_prepared_result_calls_start_and_stop_test(self):
+ result = ExtendedTestResult()
+ case = self.make_case()
+ run = RunTest(case, lambda x: x)
+ run.run(result)
+ self.assertEqual([
+ ('startTest', case),
+ ('addSuccess', case),
+ ('stopTest', case),
+ ], result._events)
+
+ def test__run_prepared_result_calls_stop_test_always(self):
+ result = ExtendedTestResult()
+ case = self.make_case()
+ def inner():
+ raise Exception("foo")
+ run = RunTest(case, lambda x: x)
+ run._run_core = inner
+ self.assertRaises(Exception, run.run, result)
+ self.assertEqual([
+ ('startTest', case),
+ ('stopTest', case),
+ ], result._events)
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/tests/test_testresult.py b/lib/testtools/tests/test_testresult.py
new file mode 100644
index 0000000000..df15b91244
--- /dev/null
+++ b/lib/testtools/tests/test_testresult.py
@@ -0,0 +1,807 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+"""Test TestResults and related things."""
+
+__metaclass__ = type
+
+import datetime
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+import doctest
+import sys
+import threading
+
+from testtools import (
+ ExtendedToOriginalDecorator,
+ MultiTestResult,
+ TestCase,
+ TestResult,
+ TextTestResult,
+ ThreadsafeForwardingResult,
+ testresult,
+ )
+from testtools.content import Content, ContentType
+from testtools.matchers import DocTestMatches
+from testtools.utils import _u, _b
+from testtools.tests.helpers import (
+ LoggingResult,
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ an_exc_info
+ )
+
+
+class TestTestResultContract(TestCase):
+ """Tests for the contract of TestResults."""
+
+ def test_addExpectedFailure(self):
+ # Calling addExpectedFailure(test, exc_info) completes ok.
+ result = self.makeResult()
+ result.addExpectedFailure(self, an_exc_info)
+
+ def test_addExpectedFailure_details(self):
+ # Calling addExpectedFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.addExpectedFailure(self, details={})
+
+ def test_addError_details(self):
+ # Calling addError(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.addError(self, details={})
+
+ def test_addFailure_details(self):
+ # Calling addFailure(test, details=xxx) completes ok.
+ result = self.makeResult()
+ result.addFailure(self, details={})
+
+ def test_addSkipped(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.addSkip(self, _u("Skipped for some reason"))
+
+ def test_addSkipped_details(self):
+ # Calling addSkip(test, reason) completes ok.
+ result = self.makeResult()
+ result.addSkip(self, details={})
+
+ def test_addUnexpectedSuccess(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.addUnexpectedSuccess(self)
+
+ def test_addUnexpectedSuccess_details(self):
+ # Calling addUnexpectedSuccess(test) completes ok.
+ result = self.makeResult()
+ result.addUnexpectedSuccess(self, details={})
+
+ def test_addSuccess_details(self):
+ # Calling addSuccess(test) completes ok.
+ result = self.makeResult()
+ result.addSuccess(self, details={})
+
+ def test_startStopTestRun(self):
+ # Calling startTestRun completes ok.
+ result = self.makeResult()
+ result.startTestRun()
+ result.stopTestRun()
+
+
+class TestTestResultContract(TestTestResultContract):
+
+ def makeResult(self):
+ return TestResult()
+
+
+class TestMultiTestresultContract(TestTestResultContract):
+
+ def makeResult(self):
+ return MultiTestResult(TestResult(), TestResult())
+
+
+class TestTextTestResultContract(TestTestResultContract):
+
+ def makeResult(self):
+ return TextTestResult(StringIO())
+
+
+class TestThreadSafeForwardingResultContract(TestTestResultContract):
+
+ def makeResult(self):
+ result_semaphore = threading.Semaphore(1)
+ target = TestResult()
+ return ThreadsafeForwardingResult(target, result_semaphore)
+
+
+class TestTestResult(TestCase):
+ """Tests for `TestResult`."""
+
+ def makeResult(self):
+ """Make an arbitrary result for testing."""
+ return TestResult()
+
+ def test_addSkipped(self):
+ # Calling addSkip on a TestResult records the test that was skipped in
+ # its skip_reasons dict.
+ result = self.makeResult()
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for some reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self]},
+ result.skip_reasons)
+ result.addSkip(self, _u("Skipped for another reason"))
+ self.assertEqual({_u("Skipped for some reason"):[self, self],
+ _u("Skipped for another reason"):[self]},
+ result.skip_reasons)
+
+ def test_now_datetime_now(self):
+ result = self.makeResult()
+ olddatetime = testresult.real.datetime
+ def restore():
+ testresult.real.datetime = olddatetime
+ self.addCleanup(restore)
+ class Module:
+ pass
+ now = datetime.datetime.now()
+ stubdatetime = Module()
+ stubdatetime.datetime = Module()
+ stubdatetime.datetime.now = lambda: now
+ testresult.real.datetime = stubdatetime
+ # Calling _now() looks up the time.
+ self.assertEqual(now, result._now())
+ then = now + datetime.timedelta(0, 1)
+ # Set an explicit datetime, which gets returned from then on.
+ result.time(then)
+ self.assertNotEqual(now, result._now())
+ self.assertEqual(then, result._now())
+ # go back to looking it up.
+ result.time(None)
+ self.assertEqual(now, result._now())
+
+ def test_now_datetime_time(self):
+ result = self.makeResult()
+ now = datetime.datetime.now()
+ result.time(now)
+ self.assertEqual(now, result._now())
+
+
+class TestWithFakeExceptions(TestCase):
+
+ def makeExceptionInfo(self, exceptionFactory, *args, **kwargs):
+ try:
+ raise exceptionFactory(*args, **kwargs)
+ except:
+ return sys.exc_info()
+
+
+class TestMultiTestResult(TestWithFakeExceptions):
+ """Tests for `MultiTestResult`."""
+
+ def setUp(self):
+ TestWithFakeExceptions.setUp(self)
+ self.result1 = LoggingResult([])
+ self.result2 = LoggingResult([])
+ self.multiResult = MultiTestResult(self.result1, self.result2)
+
+ def assertResultLogsEqual(self, expectedEvents):
+ """Assert that our test results have received the expected events."""
+ self.assertEqual(expectedEvents, self.result1._events)
+ self.assertEqual(expectedEvents, self.result2._events)
+
+ def test_empty(self):
+ # Initializing a `MultiTestResult` doesn't do anything to its
+ # `TestResult`s.
+ self.assertResultLogsEqual([])
+
+ def test_startTest(self):
+ # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
+ # its `TestResult`s.
+ self.multiResult.startTest(self)
+ self.assertResultLogsEqual([('startTest', self)])
+
+ def test_stopTest(self):
+ # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
+ # its `TestResult`s.
+ self.multiResult.stopTest(self)
+ self.assertResultLogsEqual([('stopTest', self)])
+
+ def test_addSkipped(self):
+ # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
+ # results.
+ reason = _u("Skipped for some reason")
+ self.multiResult.addSkip(self, reason)
+ self.assertResultLogsEqual([('addSkip', self, reason)])
+
+ def test_addSuccess(self):
+ # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
+ # all its `TestResult`s.
+ self.multiResult.addSuccess(self)
+ self.assertResultLogsEqual([('addSuccess', self)])
+
+ def test_done(self):
+ # Calling `done` on a `MultiTestResult` calls `done` on all its
+ # `TestResult`s.
+ self.multiResult.done()
+ self.assertResultLogsEqual([('done')])
+
+ def test_addFailure(self):
+ # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
+ # all its `TestResult`s.
+ exc_info = self.makeExceptionInfo(AssertionError, 'failure')
+ self.multiResult.addFailure(self, exc_info)
+ self.assertResultLogsEqual([('addFailure', self, exc_info)])
+
+ def test_addError(self):
+ # Calling `addError` on a `MultiTestResult` calls `addError` on all
+ # its `TestResult`s.
+ exc_info = self.makeExceptionInfo(RuntimeError, 'error')
+ self.multiResult.addError(self, exc_info)
+ self.assertResultLogsEqual([('addError', self, exc_info)])
+
+ def test_startTestRun(self):
+ # Calling `startTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.startTestRun()
+ self.assertResultLogsEqual([('startTestRun')])
+
+ def test_stopTestRun(self):
+ # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
+ # `TestResult`s.
+ self.multiResult.stopTestRun()
+ self.assertResultLogsEqual([('stopTestRun')])
+
+
+class TestTextTestResult(TestWithFakeExceptions):
+ """Tests for `TextTestResult`."""
+
+ def setUp(self):
+ super(TestTextTestResult, self).setUp()
+ self.result = TextTestResult(StringIO())
+
+ def make_erroring_test(self):
+ class Test(TestCase):
+ def error(self):
+ 1/0
+ return Test("error")
+
+ def make_failing_test(self):
+ class Test(TestCase):
+ def failed(self):
+ self.fail("yo!")
+ return Test("failed")
+
+ def make_test(self):
+ class Test(TestCase):
+ def test(self):
+ pass
+ return Test("test")
+
+ def getvalue(self):
+ return self.result.stream.getvalue()
+
+ def test__init_sets_stream(self):
+ result = TextTestResult("fp")
+ self.assertEqual("fp", result.stream)
+
+ def reset_output(self):
+ self.result.stream = StringIO()
+
+ def test_startTestRun(self):
+ self.result.startTestRun()
+ self.assertEqual("Tests running...\n", self.getvalue())
+
+ def test_stopTestRun_count_many(self):
+ test = self.make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.result.stream = StringIO()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("Ran 2 tests in ...s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_single(self):
+ test = self.make_test()
+ self.result.startTestRun()
+ self.result.startTest(test)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("Ran 1 test in ...s\n\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_count_zero(self):
+ self.result.startTestRun()
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("Ran 0 tests in ...s\n\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_current_time(self):
+ test = self.make_test()
+ now = datetime.datetime.now()
+ self.result.time(now)
+ self.result.startTestRun()
+ self.result.startTest(test)
+ now = now + datetime.timedelta(0, 0, 0, 1)
+ self.result.time(now)
+ self.result.stopTest(test)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
+
+ def test_stopTestRun_successful(self):
+ self.result.startTestRun()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\n\nOK\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_failure(self):
+ test = self.make_failing_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_not_successful_error(self):
+ test = self.make_erroring_test()
+ self.result.startTestRun()
+ test.run(self.result)
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
+
+ def test_stopTestRun_shows_details(self):
+ self.result.startTestRun()
+ self.make_erroring_test().run(self.result)
+ self.make_failing_test().run(self.result)
+ self.reset_output()
+ self.result.stopTestRun()
+ self.assertThat(self.getvalue(),
+ DocTestMatches("""...======================================================================
+ERROR: testtools.tests.test_testresult.Test.error
+----------------------------------------------------------------------
+Text attachment: traceback
+------------
+Traceback (most recent call last):
+ File "...testtools...runtest.py", line ..., in _run_user...
+ return fn(*args)
+ File "...testtools...testcase.py", line ..., in _run_test_method
+ testMethod()
+ File "...testtools...tests...test_testresult.py", line ..., in error
+ 1/0
+ZeroDivisionError: int... division or modulo by zero
+------------
+======================================================================
+FAIL: testtools.tests.test_testresult.Test.failed
+----------------------------------------------------------------------
+Text attachment: traceback
+------------
+Traceback (most recent call last):
+ File "...testtools...runtest.py", line ..., in _run_user...
+ return fn(*args)
+ File "...testtools...testcase.py", line ..., in _run_test_method
+ testMethod()
+ File "...testtools...tests...test_testresult.py", line ..., in failed
+ self.fail("yo!")
+AssertionError: yo!
+------------
+...""", doctest.ELLIPSIS))
+
+
+class TestThreadSafeForwardingResult(TestWithFakeExceptions):
+ """Tests for `MultiTestResult`."""
+
+ def setUp(self):
+ TestWithFakeExceptions.setUp(self)
+ self.result_semaphore = threading.Semaphore(1)
+ self.target = LoggingResult([])
+ self.result1 = ThreadsafeForwardingResult(self.target,
+ self.result_semaphore)
+
+ def test_nonforwarding_methods(self):
+ # startTest and stopTest are not forwarded because they need to be
+ # batched.
+ self.result1.startTest(self)
+ self.result1.stopTest(self)
+ self.assertEqual([], self.target._events)
+
+ def test_startTestRun(self):
+ self.result1.startTestRun()
+ self.result2 = ThreadsafeForwardingResult(self.target,
+ self.result_semaphore)
+ self.result2.startTestRun()
+ self.assertEqual(["startTestRun", "startTestRun"], self.target._events)
+
+ def test_stopTestRun(self):
+ self.result1.stopTestRun()
+ self.result2 = ThreadsafeForwardingResult(self.target,
+ self.result_semaphore)
+ self.result2.stopTestRun()
+ self.assertEqual(["stopTestRun", "stopTestRun"], self.target._events)
+
+ def test_forwarding_methods(self):
+ # error, failure, skip and success are forwarded in batches.
+ exc_info1 = self.makeExceptionInfo(RuntimeError, 'error')
+ self.result1.addError(self, exc_info1)
+ exc_info2 = self.makeExceptionInfo(AssertionError, 'failure')
+ self.result1.addFailure(self, exc_info2)
+ reason = _u("Skipped for some reason")
+ self.result1.addSkip(self, reason)
+ self.result1.addSuccess(self)
+ self.assertEqual([('startTest', self),
+ ('addError', self, exc_info1),
+ ('stopTest', self),
+ ('startTest', self),
+ ('addFailure', self, exc_info2),
+ ('stopTest', self),
+ ('startTest', self),
+ ('addSkip', self, reason),
+ ('stopTest', self),
+ ('startTest', self),
+ ('addSuccess', self),
+ ('stopTest', self),
+ ], self.target._events)
+
+
+class TestExtendedToOriginalResultDecoratorBase(TestCase):
+
+ def make_26_result(self):
+ self.result = Python26TestResult()
+ self.make_converter()
+
+ def make_27_result(self):
+ self.result = Python27TestResult()
+ self.make_converter()
+
+ def make_converter(self):
+ self.converter = ExtendedToOriginalDecorator(self.result)
+
+ def make_extended_result(self):
+ self.result = ExtendedTestResult()
+ self.make_converter()
+
+ def check_outcome_details(self, outcome):
+ """Call an outcome with a details dict to be passed through."""
+ # This dict is /not/ convertible - thats deliberate, as it should
+ # not hit the conversion code path.
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, details)], self.result._events)
+
+ def get_details_and_string(self):
+ """Get a details dict and expected string."""
+ text1 = lambda: [_b("1\n2\n")]
+ text2 = lambda: [_b("3\n4\n")]
+ bin1 = lambda: [_b("5\n")]
+ details = {'text 1': Content(ContentType('text', 'plain'), text1),
+ 'text 2': Content(ContentType('text', 'strange'), text2),
+ 'bin 1': Content(ContentType('application', 'binary'), bin1)}
+ return (details, "Binary content: bin 1\n"
+ "Text attachment: text 1\n------------\n1\n2\n"
+ "------------\nText attachment: text 2\n------------\n"
+ "3\n4\n------------\n")
+
+ def check_outcome_details_to_exec_info(self, outcome, expected=None):
+ """Call an outcome with a details dict to be made into exc_info."""
+ # The conversion is a done using RemoteError and the string contents
+ # of the text types in the details dict.
+ if not expected:
+ expected = outcome
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ err = self.converter._details_to_exc_info(details)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_details_to_nothing(self, outcome, expected=None):
+ """Call an outcome with a details dict to be swallowed."""
+ if not expected:
+ expected = outcome
+ details = {'foo': 'bar'}
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_details_to_string(self, outcome):
+ """Call an outcome with a details dict to be stringified."""
+ details, err_str = self.get_details_and_string()
+ getattr(self.converter, outcome)(self, details=details)
+ self.assertEqual([(outcome, self, err_str)], self.result._events)
+
+ def check_outcome_exc_info(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self, err)], self.result._events)
+
+ def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome on a fallback works."""
+ # calling some outcome with the legacy exc_info style api (no keyword
+ # parameters) gets passed through.
+ if not expected:
+ expected = outcome
+ err = sys.exc_info()
+ getattr(self.converter, outcome)(self, err)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_nothing(self, outcome, expected=None):
+ """Check that calling a legacy outcome still works."""
+ if not expected:
+ expected = outcome
+ getattr(self.converter, outcome)(self)
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string_nothing(self, outcome, expected):
+ """Check that calling outcome with a string calls expected."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(expected, self)], self.result._events)
+
+ def check_outcome_string(self, outcome):
+ """Check that calling outcome with a string works."""
+ getattr(self.converter, outcome)(self, "foo")
+ self.assertEqual([(outcome, self, "foo")], self.result._events)
+
+
+class TestExtendedToOriginalResultDecorator(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_progress_py26(self):
+ self.make_26_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_py27(self):
+ self.make_27_result()
+ self.converter.progress(1, 2)
+
+ def test_progress_pyextended(self):
+ self.make_extended_result()
+ self.converter.progress(1, 2)
+ self.assertEqual([('progress', 1, 2)], self.result._events)
+
+ def test_shouldStop(self):
+ self.make_26_result()
+ self.assertEqual(False, self.converter.shouldStop)
+ self.converter.decorated.stop()
+ self.assertEqual(True, self.converter.shouldStop)
+
+ def test_startTest_py26(self):
+ self.make_26_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_py27(self):
+ self.make_27_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTest(self)
+ self.assertEqual([('startTest', self)], self.result._events)
+
+ def test_startTestRun_py26(self):
+ self.make_26_result()
+ self.converter.startTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_startTestRun_py27(self):
+ self.make_27_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_startTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.startTestRun()
+ self.assertEqual([('startTestRun',)], self.result._events)
+
+ def test_stopTest_py26(self):
+ self.make_26_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_py27(self):
+ self.make_27_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTest_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTest(self)
+ self.assertEqual([('stopTest', self)], self.result._events)
+
+ def test_stopTestRun_py26(self):
+ self.make_26_result()
+ self.converter.stopTestRun()
+ self.assertEqual([], self.result._events)
+
+ def test_stopTestRun_py27(self):
+ self.make_27_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_stopTestRun_pyextended(self):
+ self.make_extended_result()
+ self.converter.stopTestRun()
+ self.assertEqual([('stopTestRun',)], self.result._events)
+
+ def test_tags_py26(self):
+ self.make_26_result()
+ self.converter.tags(1, 2)
+
+ def test_tags_py27(self):
+ self.make_27_result()
+ self.converter.tags(1, 2)
+
+ def test_tags_pyextended(self):
+ self.make_extended_result()
+ self.converter.tags(1, 2)
+ self.assertEqual([('tags', 1, 2)], self.result._events)
+
+ def test_time_py26(self):
+ self.make_26_result()
+ self.converter.time(1)
+
+ def test_time_py27(self):
+ self.make_27_result()
+ self.converter.time(1)
+
+ def test_time_pyextended(self):
+ self.make_extended_result()
+ self.converter.time(1)
+ self.assertEqual([('time', 1)], self.result._events)
+
+
+class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addError'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_exc_info(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_exec_info(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertRaises(ValueError,
+ getattr(self.converter, self.outcome), self)
+
+
+class TestExtendedToOriginalAddFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addFailure'
+
+
+class TestExtendedToOriginalAddExpectedFailure(
+ TestExtendedToOriginalAddError):
+
+ outcome = 'addExpectedFailure'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
+
+
+
+class TestExtendedToOriginalAddSkip(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSkip'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_string(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_string_nothing(self.outcome, 'addSuccess')
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_string(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+ def test_outcome__no_details(self):
+ self.make_extended_result()
+ self.assertRaises(ValueError,
+ getattr(self.converter, self.outcome), self)
+
+
+class TestExtendedToOriginalAddSuccess(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ outcome = 'addSuccess'
+ expected = 'addSuccess'
+
+ def test_outcome_Original_py26(self):
+ self.make_26_result()
+ self.check_outcome_nothing(self.outcome, self.expected)
+
+ def test_outcome_Original_py27(self):
+ self.make_27_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Original_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_nothing(self.outcome)
+
+ def test_outcome_Extended_py26(self):
+ self.make_26_result()
+ self.check_outcome_details_to_nothing(self.outcome, self.expected)
+
+ def test_outcome_Extended_py27(self):
+ self.make_27_result()
+ self.check_outcome_details_to_nothing(self.outcome)
+
+ def test_outcome_Extended_pyextended(self):
+ self.make_extended_result()
+ self.check_outcome_details(self.outcome)
+
+
+class TestExtendedToOriginalAddUnexpectedSuccess(
+ TestExtendedToOriginalAddSuccess):
+
+ outcome = 'addUnexpectedSuccess'
+
+
+class TestExtendedToOriginalResultOtherAttributes(
+ TestExtendedToOriginalResultDecoratorBase):
+
+ def test_other_attribute(self):
+ class OtherExtendedResult:
+ def foo(self):
+ return 2
+ bar = 1
+ self.result = OtherExtendedResult()
+ self.make_converter()
+ self.assertEqual(1, self.converter.bar)
+ self.assertEqual(2, self.converter.foo())
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/tests/test_testsuite.py b/lib/testtools/tests/test_testsuite.py
new file mode 100644
index 0000000000..3f2f02758f
--- /dev/null
+++ b/lib/testtools/tests/test_testsuite.py
@@ -0,0 +1,56 @@
+# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
+
+"""Test ConcurrentTestSuite and related things."""
+
+__metaclass__ = type
+
+import unittest
+
+from testtools import (
+ ConcurrentTestSuite,
+ iterate_tests,
+ TestCase,
+ )
+from testtools.matchers import (
+ Equals,
+ )
+from testtools.tests.helpers import LoggingResult
+
+
+class TestConcurrentTestSuiteRun(TestCase):
+
+ def test_trivial(self):
+ log = []
+ result = LoggingResult(log)
+ class Sample(TestCase):
+ def __hash__(self):
+ return id(self)
+
+ def test_method1(self):
+ pass
+ def test_method2(self):
+ pass
+ test1 = Sample('test_method1')
+ test2 = Sample('test_method2')
+ original_suite = unittest.TestSuite([test1, test2])
+ suite = ConcurrentTestSuite(original_suite, self.split_suite)
+ suite.run(result)
+ test1 = log[0][1]
+ test2 = log[-1][1]
+ self.assertIsInstance(test1, Sample)
+ self.assertIsInstance(test2, Sample)
+ self.assertNotEqual(test1.id(), test2.id())
+ # We expect the start/outcome/stop to be grouped
+ expected = [('startTest', test1), ('addSuccess', test1),
+ ('stopTest', test1), ('startTest', test2), ('addSuccess', test2),
+ ('stopTest', test2)]
+ self.assertThat(log, Equals(expected))
+
+ def split_suite(self, suite):
+ tests = list(iterate_tests(suite))
+ return tests[0], tests[1]
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/testtools/tests/test_testtools.py b/lib/testtools/tests/test_testtools.py
new file mode 100644
index 0000000000..af1fd794c3
--- /dev/null
+++ b/lib/testtools/tests/test_testtools.py
@@ -0,0 +1,755 @@
+# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
+
+"""Tests for extensions to the base test library."""
+
+import sys
+import unittest
+
+from testtools import (
+ TestCase,
+ clone_test_with_new_id,
+ content,
+ skip,
+ skipIf,
+ skipUnless,
+ testcase,
+ )
+from testtools.matchers import (
+ Equals,
+ )
+from testtools.tests.helpers import (
+ an_exc_info,
+ LoggingResult,
+ Python26TestResult,
+ Python27TestResult,
+ ExtendedTestResult,
+ )
+
+
+class TestEquality(TestCase):
+ """Test `TestCase`'s equality implementation."""
+
+ def test_identicalIsEqual(self):
+ # TestCase's are equal if they are identical.
+ self.assertEqual(self, self)
+
+ def test_nonIdenticalInUnequal(self):
+ # TestCase's are not equal if they are not identical.
+ self.assertNotEqual(TestCase(methodName='run'),
+ TestCase(methodName='skip'))
+
+
+class TestAssertions(TestCase):
+ """Test assertions in TestCase."""
+
+ def raiseError(self, exceptionFactory, *args, **kwargs):
+ raise exceptionFactory(*args, **kwargs)
+
+ def test_formatTypes_single(self):
+ # Given a single class, _formatTypes returns the name.
+ class Foo:
+ pass
+ self.assertEqual('Foo', self._formatTypes(Foo))
+
+ def test_formatTypes_multiple(self):
+ # Given multiple types, _formatTypes returns the names joined by
+ # commas.
+ class Foo:
+ pass
+ class Bar:
+ pass
+ self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar]))
+
+ def test_assertRaises(self):
+ # assertRaises asserts that a callable raises a particular exception.
+ self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
+
+ def test_assertRaises_fails_when_no_error_raised(self):
+ # assertRaises raises self.failureException when it's passed a
+ # callable that raises no error.
+ ret = ('orange', 42)
+ try:
+ self.assertRaises(RuntimeError, lambda: ret)
+ except self.failureException:
+ # We expected assertRaises to raise this exception.
+ e = sys.exc_info()[1]
+ self.assertEqual(
+ '%s not raised, %r returned instead.'
+ % (self._formatTypes(RuntimeError), ret), str(e))
+ else:
+ self.fail('Expected assertRaises to fail, but it did not.')
+
+ def test_assertRaises_fails_when_different_error_raised(self):
+ # assertRaises re-raises an exception that it didn't expect.
+ self.assertRaises(
+ ZeroDivisionError,
+ self.assertRaises,
+ RuntimeError, self.raiseError, ZeroDivisionError)
+
+ def test_assertRaises_returns_the_raised_exception(self):
+ # assertRaises returns the exception object that was raised. This is
+ # useful for testing that exceptions have the right message.
+
+ # This contraption stores the raised exception, so we can compare it
+ # to the return value of assertRaises.
+ raisedExceptions = []
+ def raiseError():
+ try:
+ raise RuntimeError('Deliberate error')
+ except RuntimeError:
+ raisedExceptions.append(sys.exc_info()[1])
+ raise
+
+ exception = self.assertRaises(RuntimeError, raiseError)
+ self.assertEqual(1, len(raisedExceptions))
+ self.assertTrue(
+ exception is raisedExceptions[0],
+ "%r is not %r" % (exception, raisedExceptions[0]))
+
+ def test_assertRaises_with_multiple_exceptions(self):
+ # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that
+ # function raises one of ExceptionTwo or ExceptionOne.
+ expectedExceptions = (RuntimeError, ZeroDivisionError)
+ self.assertRaises(
+ expectedExceptions, self.raiseError, expectedExceptions[0])
+ self.assertRaises(
+ expectedExceptions, self.raiseError, expectedExceptions[1])
+
+ def test_assertRaises_with_multiple_exceptions_failure_mode(self):
+ # If assertRaises is called expecting one of a group of exceptions and
+ # a callable that doesn't raise an exception, then fail with an
+ # appropriate error message.
+ expectedExceptions = (RuntimeError, ZeroDivisionError)
+ failure = self.assertRaises(
+ self.failureException,
+ self.assertRaises, expectedExceptions, lambda: None)
+ self.assertEqual(
+ '%s not raised, None returned instead.'
+ % self._formatTypes(expectedExceptions), str(failure))
+
+ def assertFails(self, message, function, *args, **kwargs):
+ """Assert that function raises a failure with the given message."""
+ failure = self.assertRaises(
+ self.failureException, function, *args, **kwargs)
+ self.assertEqual(message, str(failure))
+
+ def test_assertIn_success(self):
+ # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'.
+ self.assertIn(3, range(10))
+ self.assertIn('foo', 'foo bar baz')
+ self.assertIn('foo', 'foo bar baz'.split())
+
+ def test_assertIn_failure(self):
+ # assertIn(needle, haystack) fails the test when 'needle' is not in
+ # 'haystack'.
+ self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2])
+ self.assertFails(
+ '%r not in %r' % ('qux', 'foo bar baz'),
+ self.assertIn, 'qux', 'foo bar baz')
+
+ def test_assertNotIn_success(self):
+ # assertNotIn(needle, haystack) asserts that 'needle' is not in
+ # 'haystack'.
+ self.assertNotIn(3, [0, 1, 2])
+ self.assertNotIn('qux', 'foo bar baz')
+
+ def test_assertNotIn_failure(self):
+ # assertNotIn(needle, haystack) fails the test when 'needle' is in
+ # 'haystack'.
+ self.assertFails('3 in [1, 2, 3]', self.assertNotIn, 3, [1, 2, 3])
+ self.assertFails(
+ '%r in %r' % ('foo', 'foo bar baz'),
+ self.assertNotIn, 'foo', 'foo bar baz')
+
+ def test_assertIsInstance(self):
+ # assertIsInstance asserts that an object is an instance of a class.
+
+ class Foo:
+ """Simple class for testing assertIsInstance."""
+
+ foo = Foo()
+ self.assertIsInstance(foo, Foo)
+
+ def test_assertIsInstance_multiple_classes(self):
+ # assertIsInstance asserts that an object is an instance of one of a
+ # group of classes.
+
+ class Foo:
+ """Simple class for testing assertIsInstance."""
+
+ class Bar:
+ """Another simple class for testing assertIsInstance."""
+
+ foo = Foo()
+ self.assertIsInstance(foo, (Foo, Bar))
+ self.assertIsInstance(Bar(), (Foo, Bar))
+
+ def test_assertIsInstance_failure(self):
+ # assertIsInstance(obj, klass) fails the test when obj is not an
+ # instance of klass.
+
+ class Foo:
+ """Simple class for testing assertIsInstance."""
+
+ self.assertFails(
+ '42 is not an instance of %s' % self._formatTypes(Foo),
+ self.assertIsInstance, 42, Foo)
+
+ def test_assertIsInstance_failure_multiple_classes(self):
+ # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is
+ # not an instance of klass1 or klass2.
+
+ class Foo:
+ """Simple class for testing assertIsInstance."""
+
+ class Bar:
+ """Another simple class for testing assertIsInstance."""
+
+ self.assertFails(
+ '42 is not an instance of %s' % self._formatTypes([Foo, Bar]),
+ self.assertIsInstance, 42, (Foo, Bar))
+
+ def test_assertIs(self):
+ # assertIs asserts that an object is identical to another object.
+ self.assertIs(None, None)
+ some_list = [42]
+ self.assertIs(some_list, some_list)
+ some_object = object()
+ self.assertIs(some_object, some_object)
+
+ def test_assertIs_fails(self):
+ # assertIs raises assertion errors if one object is not identical to
+ # another.
+ self.assertFails('None is not 42', self.assertIs, None, 42)
+ self.assertFails('[42] is not [42]', self.assertIs, [42], [42])
+
+ def test_assertIs_fails_with_message(self):
+ # assertIs raises assertion errors if one object is not identical to
+ # another, and includes a user-supplied message, if it's provided.
+ self.assertFails(
+ 'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar')
+
+ def test_assertIsNot(self):
+ # assertIsNot asserts that an object is not identical to another
+ # object.
+ self.assertIsNot(None, 42)
+ self.assertIsNot([42], [42])
+ self.assertIsNot(object(), object())
+
+ def test_assertIsNot_fails(self):
+ # assertIsNot raises assertion errors if one object is identical to
+ # another.
+ self.assertFails('None is None', self.assertIsNot, None, None)
+ some_list = [42]
+ self.assertFails(
+ '[42] is [42]', self.assertIsNot, some_list, some_list)
+
+ def test_assertIsNot_fails_with_message(self):
+ # assertIsNot raises assertion errors if one object is identical to
+ # another, and includes a user-supplied message if it's provided.
+ self.assertFails(
+ 'None is None: foo bar', self.assertIsNot, None, None, "foo bar")
+
+ def test_assertThat_matches_clean(self):
+ class Matcher:
+ def match(self, foo):
+ return None
+ self.assertThat("foo", Matcher())
+
+ def test_assertThat_mismatch_raises_description(self):
+ calls = []
+ class Mismatch:
+ def __init__(self, thing):
+ self.thing = thing
+ def describe(self):
+ calls.append(('describe_diff', self.thing))
+ return "object is not a thing"
+ class Matcher:
+ def match(self, thing):
+ calls.append(('match', thing))
+ return Mismatch(thing)
+ def __str__(self):
+ calls.append(('__str__',))
+ return "a description"
+ class Test(TestCase):
+ def test(self):
+ self.assertThat("foo", Matcher())
+ result = Test("test").run()
+ self.assertEqual([
+ ('match', "foo"),
+ ('describe_diff', "foo"),
+ ('__str__',),
+ ], calls)
+ self.assertFalse(result.wasSuccessful())
+
+
+class TestAddCleanup(TestCase):
+ """Tests for TestCase.addCleanup."""
+
+ class LoggingTest(TestCase):
+ """A test that logs calls to setUp, runTest and tearDown."""
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._calls = ['setUp']
+
+ def brokenSetUp(self):
+ # A tearDown that deliberately fails.
+ self._calls = ['brokenSetUp']
+ raise RuntimeError('Deliberate Failure')
+
+ def runTest(self):
+ self._calls.append('runTest')
+
+ def tearDown(self):
+ self._calls.append('tearDown')
+ TestCase.tearDown(self)
+
+ def setUp(self):
+ TestCase.setUp(self)
+ self._result_calls = []
+ self.test = TestAddCleanup.LoggingTest('runTest')
+ self.logging_result = LoggingResult(self._result_calls)
+
+ def assertErrorLogEqual(self, messages):
+ self.assertEqual(messages, [call[0] for call in self._result_calls])
+
+ def assertTestLogEqual(self, messages):
+ """Assert that the call log equals 'messages'."""
+ case = self._result_calls[0][1]
+ self.assertEqual(messages, case._calls)
+
+ def logAppender(self, message):
+ """A cleanup that appends 'message' to the tests log.
+
+ Cleanups are callables that are added to a test by addCleanup. To
+ verify that our cleanups run in the right order, we add strings to a
+ list that acts as a log. This method returns a cleanup that will add
+ the given message to that log when run.
+ """
+ self.test._calls.append(message)
+
+ def test_fixture(self):
+ # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
+ # This test doesn't test addCleanup itself, it just sanity checks the
+ # fixture.
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+ def test_cleanup_run_before_tearDown(self):
+ # Cleanup functions added with 'addCleanup' are called before tearDown
+ # runs.
+ self.test.addCleanup(self.logAppender, 'cleanup')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
+
+ def test_add_cleanup_called_if_setUp_fails(self):
+ # Cleanup functions added with 'addCleanup' are called even if setUp
+ # fails. Note that tearDown has a different behavior: it is only
+ # called when setUp succeeds.
+ self.test.setUp = self.test.brokenSetUp
+ self.test.addCleanup(self.logAppender, 'cleanup')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
+
+ def test_addCleanup_called_in_reverse_order(self):
+ # Cleanup functions added with 'addCleanup' are called in reverse
+ # order.
+ #
+ # One of the main uses of addCleanup is to dynamically create
+ # resources that need some sort of explicit tearDown. Often one
+ # resource will be created in terms of another, e.g.,
+ # self.first = self.makeFirst()
+ # self.second = self.makeSecond(self.first)
+ #
+ # When this happens, we generally want to clean up the second resource
+ # before the first one, since the second depends on the first.
+ self.test.addCleanup(self.logAppender, 'first')
+ self.test.addCleanup(self.logAppender, 'second')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(
+ ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+ def test_tearDown_runs_after_cleanup_failure(self):
+ # tearDown runs even if a cleanup function fails.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
+
+ def test_cleanups_continue_running_after_error(self):
+ # All cleanups are always run, even if one or two of them fail.
+ self.test.addCleanup(self.logAppender, 'first')
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(self.logAppender, 'second')
+ self.test.run(self.logging_result)
+ self.assertTestLogEqual(
+ ['setUp', 'runTest', 'tearDown', 'second', 'first'])
+
+ def test_error_in_cleanups_are_captured(self):
+ # If a cleanup raises an error, we want to record it and fail the the
+ # test, even though we go on to run other cleanups.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
+
+ def test_keyboard_interrupt_not_caught(self):
+ # If a cleanup raises KeyboardInterrupt, it gets reraised.
+ def raiseKeyboardInterrupt():
+ raise KeyboardInterrupt()
+ self.test.addCleanup(raiseKeyboardInterrupt)
+ self.assertRaises(
+ KeyboardInterrupt, self.test.run, self.logging_result)
+
+ def test_multipleErrorsReported(self):
+ # Errors from all failing cleanups are reported.
+ self.test.addCleanup(lambda: 1/0)
+ self.test.addCleanup(lambda: 1/0)
+ self.test.run(self.logging_result)
+ self.assertErrorLogEqual(
+ ['startTest', 'addError', 'addError', 'stopTest'])
+
+
+class TestWithDetails(TestCase):
+
+ def assertDetailsProvided(self, case, expected_outcome, expected_keys):
+ """Assert that when case is run, details are provided to the result.
+
+ :param case: A TestCase to run.
+ :param expected_outcome: The call that should be made.
+ :param expected_keys: The keys to look for.
+ """
+ result = ExtendedTestResult()
+ case.run(result)
+ case = result._events[0][1]
+ expected = [
+ ('startTest', case),
+ (expected_outcome, case),
+ ('stopTest', case),
+ ]
+ self.assertEqual(3, len(result._events))
+ self.assertEqual(expected[0], result._events[0])
+ self.assertEqual(expected[1], result._events[1][0:2])
+ # Checking the TB is right is rather tricky. doctest line matching
+ # would help, but 'meh'.
+ self.assertEqual(sorted(expected_keys),
+ sorted(result._events[1][2].keys()))
+ self.assertEqual(expected[-1], result._events[-1])
+
+ def get_content(self):
+ return content.Content(
+ content.ContentType("text", "foo"), lambda: ['foo'])
+
+
+class TestExpectedFailure(TestWithDetails):
+ """Tests for expected failures and unexpected successess."""
+
+ def make_unexpected_case(self):
+ class Case(TestCase):
+ def test(self):
+ raise testcase._UnexpectedSuccess
+ case = Case('test')
+ return case
+
+ def test_raising__UnexpectedSuccess_py27(self):
+ case = self.make_unexpected_case()
+ result = Python27TestResult()
+ case.run(result)
+ case = result._events[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addUnexpectedSuccess', case),
+ ('stopTest', case),
+ ], result._events)
+
+ def test_raising__UnexpectedSuccess_extended(self):
+ case = self.make_unexpected_case()
+ result = ExtendedTestResult()
+ case.run(result)
+ case = result._events[0][1]
+ self.assertEqual([
+ ('startTest', case),
+ ('addUnexpectedSuccess', case, {}),
+ ('stopTest', case),
+ ], result._events)
+
+ def make_xfail_case_xfails(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.expectFailure("we are sad", self.assertEqual,
+ 1, 0)
+ case = Case('test')
+ return case
+
+ def make_xfail_case_succeeds(self):
+ content = self.get_content()
+ class Case(TestCase):
+ def test(self):
+ self.addDetail("foo", content)
+ self.expectFailure("we are sad", self.assertEqual,
+ 1, 1)
+ case = Case('test')
+ return case
+
+ def test_expectFailure_KnownFailure_extended(self):
+ case = self.make_xfail_case_xfails()
+ self.assertDetailsProvided(case, "addExpectedFailure",
+ ["foo", "traceback", "reason"])
+
+ def test_expectFailure_KnownFailure_unexpected_success(self):
+ case = self.make_xfail_case_succeeds()
+ self.assertDetailsProvided(case, "addUnexpectedSuccess",
+ ["foo", "reason"])
+
+
+class TestUniqueFactories(TestCase):
+ """Tests for getUniqueString and getUniqueInteger."""
+
+ def test_getUniqueInteger(self):
+ # getUniqueInteger returns an integer that increments each time you
+ # call it.
+ one = self.getUniqueInteger()
+ self.assertEqual(1, one)
+ two = self.getUniqueInteger()
+ self.assertEqual(2, two)
+
+ def test_getUniqueString(self):
+ # getUniqueString returns the current test id followed by a unique
+ # integer.
+ name_one = self.getUniqueString()
+ self.assertEqual('%s-%d' % (self.id(), 1), name_one)
+ name_two = self.getUniqueString()
+ self.assertEqual('%s-%d' % (self.id(), 2), name_two)
+
+ def test_getUniqueString_prefix(self):
+ # If getUniqueString is given an argument, it uses that argument as
+ # the prefix of the unique string, rather than the test id.
+ name_one = self.getUniqueString('foo')
+ self.assertThat(name_one, Equals('foo-1'))
+ name_two = self.getUniqueString('bar')
+ self.assertThat(name_two, Equals('bar-2'))
+
+
+class TestCloneTestWithNewId(TestCase):
+ """Tests for clone_test_with_new_id."""
+
+ def test_clone_test_with_new_id(self):
+ class FooTestCase(TestCase):
+ def test_foo(self):
+ pass
+ test = FooTestCase('test_foo')
+ oldName = test.id()
+ newName = self.getUniqueString()
+ newTest = clone_test_with_new_id(test, newName)
+ self.assertEqual(newName, newTest.id())
+ self.assertEqual(oldName, test.id(),
+ "the original test instance should be unchanged.")
+
+
+class TestDetailsProvided(TestWithDetails):
+
+ def test_addDetail(self):
+ mycontent = self.get_content()
+ self.addDetail("foo", mycontent)
+ details = self.getDetails()
+ self.assertEqual({"foo": mycontent}, details)
+
+ def test_addError(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ 1/0
+ self.assertDetailsProvided(Case("test"), "addError",
+ ["foo", "traceback"])
+
+ def test_addFailure(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.fail('yo')
+ self.assertDetailsProvided(Case("test"), "addFailure",
+ ["foo", "traceback"])
+
+ def test_addSkip(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.skip('yo')
+ self.assertDetailsProvided(Case("test"), "addSkip",
+ ["foo", "reason"])
+
+ def test_addSucccess(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ self.assertDetailsProvided(Case("test"), "addSuccess",
+ ["foo"])
+
+ def test_addUnexpectedSuccess(self):
+ class Case(TestCase):
+ def test(this):
+ this.addDetail("foo", self.get_content())
+ raise testcase._UnexpectedSuccess()
+ self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess",
+ ["foo"])
+
+
+class TestSetupTearDown(TestCase):
+
+ def test_setUpNotCalled(self):
+ class DoesnotcallsetUp(TestCase):
+ def setUp(self):
+ pass
+ def test_method(self):
+ pass
+ result = unittest.TestResult()
+ DoesnotcallsetUp('test_method').run(result)
+ self.assertEqual(1, len(result.errors))
+
+ def test_tearDownNotCalled(self):
+ class DoesnotcalltearDown(TestCase):
+ def test_method(self):
+ pass
+ def tearDown(self):
+ pass
+ result = unittest.TestResult()
+ DoesnotcalltearDown('test_method').run(result)
+ self.assertEqual(1, len(result.errors))
+
+
+class TestSkipping(TestCase):
+ """Tests for skipping of tests functionality."""
+
+ def test_skip_causes_skipException(self):
+ self.assertRaises(self.skipException, self.skip, "Skip this test")
+
+ def test_skip_without_reason_works(self):
+ class Test(TestCase):
+ def test(self):
+ raise self.skipException()
+ case = Test("test")
+ result = ExtendedTestResult()
+ case.run(result)
+ self.assertEqual('addSkip', result._events[1][0])
+ self.assertEqual('no reason given.',
+ ''.join(result._events[1][2]['reason'].iter_text()))
+
+ def test_skipException_in_setup_calls_result_addSkip(self):
+ class TestThatRaisesInSetUp(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ self.skip("skipping this test")
+ def test_that_passes(self):
+ pass
+ calls = []
+ result = LoggingResult(calls)
+ test = TestThatRaisesInSetUp("test_that_passes")
+ test.run(result)
+ case = result._events[0][1]
+ self.assertEqual([('startTest', case),
+ ('addSkip', case, "Text attachment: reason\n------------\n"
+ "skipping this test\n------------\n"), ('stopTest', case)],
+ calls)
+
+ def test_skipException_in_test_method_calls_result_addSkip(self):
+ class SkippingTest(TestCase):
+ def test_that_raises_skipException(self):
+ self.skip("skipping this test")
+ result = Python27TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ case = result._events[0][1]
+ self.assertEqual([('startTest', case),
+ ('addSkip', case, "Text attachment: reason\n------------\n"
+ "skipping this test\n------------\n"), ('stopTest', case)],
+ result._events)
+
+ def test_skip__in_setup_with_old_result_object_calls_addSuccess(self):
+ class SkippingTest(TestCase):
+ def setUp(self):
+ TestCase.setUp(self)
+ raise self.skipException("skipping this test")
+ def test_that_raises_skipException(self):
+ pass
+ result = Python26TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skip_with_old_result_object_calls_addError(self):
+ class SkippingTest(TestCase):
+ def test_that_raises_skipException(self):
+ raise self.skipException("skipping this test")
+ result = Python26TestResult()
+ test = SkippingTest("test_that_raises_skipException")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skip_decorator(self):
+ class SkippingTest(TestCase):
+ @skip("skipping this test")
+ def test_that_is_decorated_with_skip(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skip")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skipIf_decorator(self):
+ class SkippingTest(TestCase):
+ @skipIf(True, "skipping this test")
+ def test_that_is_decorated_with_skipIf(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skipIf")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+ def test_skipUnless_decorator(self):
+ class SkippingTest(TestCase):
+ @skipUnless(False, "skipping this test")
+ def test_that_is_decorated_with_skipUnless(self):
+ self.fail()
+ result = Python26TestResult()
+ test = SkippingTest("test_that_is_decorated_with_skipUnless")
+ test.run(result)
+ self.assertEqual('addSuccess', result._events[1][0])
+
+
+class TestOnException(TestCase):
+
+ def test_default_works(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.onException(an_exc_info)
+ events.append(True)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([True]))
+
+ def test_added_handler_works(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.addOnException(events.append)
+ self.onException(an_exc_info)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([an_exc_info]))
+
+ def test_handler_that_raises_is_not_caught(self):
+ events = []
+ class Case(TestCase):
+ def method(self):
+ self.addOnException(events.index)
+ self.assertRaises(ValueError, self.onException, an_exc_info)
+ case = Case("method")
+ case.run()
+ self.assertThat(events, Equals([]))
+
+
+def test_suite():
+ from unittest import TestLoader
+ return TestLoader().loadTestsFromName(__name__)