summaryrefslogtreecommitdiff
path: root/lib/subunit
diff options
context:
space:
mode:
authorJeremy Allison <jra@samba.org>2010-03-31 10:01:03 -0700
committerJeremy Allison <jra@samba.org>2010-03-31 10:01:03 -0700
commit2e839a636b2ea3f4d8dfcf5a8e99d9725787ba61 (patch)
tree42f219978f7d07d8fa196cb9ebd9db7be971450d /lib/subunit
parentf58d02dbeeeba037ee79fba93a707e959e90ffa3 (diff)
parent6f30b9a6ff57ca6112e6319c64c411d2bf09be79 (diff)
downloadsamba-2e839a636b2ea3f4d8dfcf5a8e99d9725787ba61.tar.gz
samba-2e839a636b2ea3f4d8dfcf5a8e99d9725787ba61.tar.bz2
samba-2e839a636b2ea3f4d8dfcf5a8e99d9725787ba61.zip
Merge branch 'master' of ssh://git.samba.org/data/git/samba
Diffstat (limited to 'lib/subunit')
-rw-r--r--lib/subunit/Apache-2.0202
-rw-r--r--lib/subunit/BSD26
-rw-r--r--lib/subunit/COPYING36
-rw-r--r--lib/subunit/INSTALL25
-rw-r--r--lib/subunit/Makefile.am136
-rw-r--r--lib/subunit/NEWS174
-rw-r--r--lib/subunit/README217
-rw-r--r--lib/subunit/c++/README50
-rw-r--r--lib/subunit/c++/SubunitTestProgressListener.cpp63
-rw-r--r--lib/subunit/c++/SubunitTestProgressListener.h56
-rw-r--r--lib/subunit/c/README68
-rw-r--r--lib/subunit/c/include/subunit/child.h79
-rw-r--r--lib/subunit/c/lib/child.c82
-rw-r--r--lib/subunit/c/tests/test_child.c192
-rw-r--r--lib/subunit/configure.ac75
-rwxr-xr-xlib/subunit/filters/subunit-filter105
-rwxr-xr-xlib/subunit/filters/subunit-ls93
-rwxr-xr-xlib/subunit/filters/subunit-notify65
-rwxr-xr-xlib/subunit/filters/subunit-stats41
-rwxr-xr-xlib/subunit/filters/subunit-tags26
-rwxr-xr-xlib/subunit/filters/subunit2gtk259
-rwxr-xr-xlib/subunit/filters/subunit2junitxml65
-rwxr-xr-xlib/subunit/filters/subunit2pyunit48
-rw-r--r--lib/subunit/libcppunit_subunit.pc.in11
-rw-r--r--lib/subunit/libsubunit.pc.in11
-rwxr-xr-xlib/subunit/perl/Makefile.PL.in20
-rw-r--r--lib/subunit/perl/lib/Subunit.pm162
-rw-r--r--lib/subunit/perl/lib/Subunit/Diff.pm85
-rwxr-xr-xlib/subunit/perl/subunit-diff31
-rw-r--r--lib/subunit/python/testtools/__init__.py58
-rw-r--r--lib/subunit/python/testtools/content.py91
-rw-r--r--lib/subunit/python/testtools/content_type.py30
-rw-r--r--lib/subunit/python/testtools/matchers.py282
-rwxr-xr-xlib/subunit/python/testtools/run.py39
-rw-r--r--lib/subunit/python/testtools/runtest.py142
-rw-r--r--lib/subunit/python/testtools/testcase.py468
-rw-r--r--lib/subunit/python/testtools/testresult/__init__.py19
-rw-r--r--lib/subunit/python/testtools/testresult/doubles.py95
-rw-r--r--lib/subunit/python/testtools/testresult/real.py540
-rw-r--r--lib/subunit/python/testtools/tests/__init__.py30
-rw-r--r--lib/subunit/python/testtools/tests/helpers.py67
-rw-r--r--lib/subunit/python/testtools/tests/test_content.py72
-rw-r--r--lib/subunit/python/testtools/tests/test_content_type.py34
-rw-r--r--lib/subunit/python/testtools/tests/test_matchers.py171
-rw-r--r--lib/subunit/python/testtools/tests/test_runtest.py185
-rw-r--r--lib/subunit/python/testtools/tests/test_testresult.py807
-rw-r--r--lib/subunit/python/testtools/tests/test_testsuite.py56
-rw-r--r--lib/subunit/python/testtools/tests/test_testtools.py755
-rw-r--r--lib/subunit/python/testtools/testsuite.py74
-rw-r--r--lib/subunit/python/testtools/utils.py39
-rwxr-xr-xlib/subunit/runtests.py138
-rw-r--r--lib/subunit/shell/README62
-rw-r--r--lib/subunit/shell/share/subunit.sh56
-rwxr-xr-xlib/subunit/shell/tests/test_function_output.sh97
-rwxr-xr-xlib/subunit/shell/tests/test_source_library.sh108
-rwxr-xr-xlib/subunit/update.sh16
56 files changed, 2958 insertions, 4076 deletions
diff --git a/lib/subunit/Apache-2.0 b/lib/subunit/Apache-2.0
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/lib/subunit/Apache-2.0
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/lib/subunit/BSD b/lib/subunit/BSD
new file mode 100644
index 0000000000..fa130cd529
--- /dev/null
+++ b/lib/subunit/BSD
@@ -0,0 +1,26 @@
+Copyright (c) Robert Collins and Subunit contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+3. Neither the name of Robert Collins nor the names of Subunit contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS''
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGE.
diff --git a/lib/subunit/COPYING b/lib/subunit/COPYING
new file mode 100644
index 0000000000..3ba50f8e08
--- /dev/null
+++ b/lib/subunit/COPYING
@@ -0,0 +1,36 @@
+Subunit is licensed under two licenses, the Apache License, Version 2.0 or the
+3-clause BSD License. You may use this project under either of these licenses
+- choose the one that works best for you.
+
+We require contributions to be licensed under both licenses. The primary
+difference between them is that the Apache license takes care of potential
+issues with Patents and other intellectual property concerns. This is
+important to Subunit as Subunit wants to be license compatible in a very
+broad manner to allow reuse and incorporation into other projects.
+
+Generally every source file in Subunit needs a license grant under both these
+licenses. As the code is shipped as a single unit, a brief form is used:
+----
+Copyright (c) [yyyy][,yyyy]* [name or 'Subunit Contributors']
+
+Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+license at the users choice. A copy of both licenses are available in the
+project source as Apache-2.0 and BSD. You may not use this file except in
+compliance with one of these two licences.
+
+Unless required by applicable law or agreed to in writing, software
+distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+license you chose for the specific language governing permissions and
+limitations under that license.
+----
+
+Code that has been incorporated into Subunit from other projects will
+naturally be under its own license, and will retain that license.
+
+A known list of such code is maintained here:
+* The python/iso8601 module by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+* The runtests.py and python/subunit/tests/TestUtil.py module are GPL test
+ support modules. There are not installed by Subunit - they are only ever
+ used on the build machine. Copyright 2004 Canonical Limited.
diff --git a/lib/subunit/INSTALL b/lib/subunit/INSTALL
new file mode 100644
index 0000000000..79cf7c18d0
--- /dev/null
+++ b/lib/subunit/INSTALL
@@ -0,0 +1,25 @@
+To install subunit
+------------------
+
+Bootstrap::
+ autoreconf -vi
+Configure::
+ ./configure
+Install::
+ make install
+
+Dependencies
+------------
+
+* Python for the filters
+* 'testtools' (On Debian and Ubuntu systems the 'python-testtools' package,
+ the testtools package on pypi, or https://launchpad.net/testtools) for
+ the extended test API which permits attachments. Version 0.9.2 or newer is
+ required. Of particular note, http://testtools.python-hosting.com/ is not
+ the testtools you want.
+* A C compiler for the C bindings
+* Perl for the Perl tools (including subunit-diff)
+* Check to run the subunit test suite.
+* python-gtk2 if you wish to use subunit2gtk
+* python-junitxml if you wish to use subunit2junitxml
+* pkg-config for configure detection of supporting libraries.
diff --git a/lib/subunit/Makefile.am b/lib/subunit/Makefile.am
new file mode 100644
index 0000000000..716fa0fe21
--- /dev/null
+++ b/lib/subunit/Makefile.am
@@ -0,0 +1,136 @@
+EXTRA_DIST = \
+ .bzrignore \
+ Apache-2.0 \
+ BSD \
+ INSTALL \
+ Makefile.am \
+ NEWS \
+ README \
+ c++/README \
+ c/README \
+ c/check-subunit-0.9.3.patch \
+ c/check-subunit-0.9.5.patch \
+ c/check-subunit-0.9.6.patch \
+ perl/Makefile.PL.in \
+ perl/lib/Subunit.pm \
+ perl/lib/Subunit/Diff.pm \
+ perl/subunit-diff \
+ python/iso8601/LICENSE \
+ python/iso8601/README \
+ python/iso8601/README.subunit \
+ python/iso8601/setup.py \
+ python/iso8601/test_iso8601.py \
+ python/subunit/tests/TestUtil.py \
+ python/subunit/tests/__init__.py \
+ python/subunit/tests/sample-script.py \
+ python/subunit/tests/sample-two-script.py \
+ python/subunit/tests/test_chunked.py \
+ python/subunit/tests/test_details.py \
+ python/subunit/tests/test_progress_model.py \
+ python/subunit/tests/test_subunit_filter.py \
+ python/subunit/tests/test_subunit_stats.py \
+ python/subunit/tests/test_subunit_tags.py \
+ python/subunit/tests/test_tap2subunit.py \
+ python/subunit/tests/test_test_protocol.py \
+ python/subunit/tests/test_test_results.py \
+ runtests.py \
+ shell/README \
+ shell/share/subunit.sh \
+ shell/subunit-ui.patch \
+ shell/tests/test_function_output.sh \
+ shell/tests/test_source_library.sh
+
+ACLOCAL_AMFLAGS = -I m4
+
+include_subunitdir = $(includedir)/subunit
+
+dist_bin_SCRIPTS = \
+ filters/subunit-filter \
+ filters/subunit-ls \
+ filters/subunit-stats \
+ filters/subunit-tags \
+ filters/subunit2gtk \
+ filters/subunit2junitxml \
+ filters/subunit2pyunit \
+ filters/tap2subunit
+
+TESTS_ENVIRONMENT = SHELL_SHARE='$(top_srcdir)/shell/share/' PYTHONPATH='$(abs_top_srcdir)/python':${PYTHONPATH}
+TESTS = runtests.py $(check_PROGRAMS)
+
+## install libsubunit.pc
+pcdatadir = $(libdir)/pkgconfig
+pcdata_DATA = \
+ libsubunit.pc \
+ libcppunit_subunit.pc
+
+pkgpython_PYTHON = \
+ python/subunit/__init__.py \
+ python/subunit/chunked.py \
+ python/subunit/details.py \
+ python/subunit/iso8601.py \
+ python/subunit/progress_model.py \
+ python/subunit/run.py \
+ python/subunit/test_results.py
+
+lib_LTLIBRARIES = libsubunit.la
+lib_LTLIBRARIES += libcppunit_subunit.la
+
+include_subunit_HEADERS = \
+ c/include/subunit/child.h \
+ c++/SubunitTestProgressListener.h
+
+check_PROGRAMS = \
+ c/tests/test_child
+
+check_SCRIPTS = \
+ runtests.py
+
+libsubunit_la_SOURCES = \
+ c/lib/child.c \
+ c/include/subunit/child.h
+
+libcppunit_subunit_la_SOURCES = \
+ c++/SubunitTestProgressListener.cpp \
+ c++/SubunitTestProgressListener.h
+
+tests_LDADD = @CHECK_LIBS@ $(top_builddir)/libsubunit.la
+c_tests_test_child_CFLAGS = -I$(top_srcdir)/c/include $(SUBUNIT_CFLAGS) @CHECK_CFLAGS@
+c_tests_test_child_LDADD = $(tests_LDADD)
+
+
+all-local: perl/Makefile
+ $(MAKE) -C perl all
+
+check-local: perl/Makefile
+ $(MAKE) -C perl check
+
+clean-local:
+ find . -type f -name "*.pyc" -exec rm {} ';'
+ rm -f perl/Makefile
+
+# Remove perl dir for VPATH builds.
+distclean-local:
+ -rmdir perl > /dev/null
+ -rm perl/Makefile.PL > /dev/null
+
+install-exec-local: perl/Makefile
+ $(MAKE) -C perl install
+
+mostlyclean-local:
+ rm -rf perl/blib
+ rm -rf perl/pm_to_blib
+
+# 'uninstall' perl files during distcheck
+uninstall-local:
+ if [ "_inst" = `basename ${prefix}` ]; then \
+ $(MAKE) -C perl uninstall_distcheck; \
+ rm -f "$(DESTDIR)$(bindir)"/subunit-diff; \
+ fi
+
+# The default for MakeMaker; can be overridden by exporting
+INSTALLDIRS ?= site
+
+perl/Makefile: perl/Makefile.PL
+ mkdir -p perl
+ cd perl && perl Makefile.PL INSTALLDIRS=${INSTALLDIRS}
+ -rm perl/Makefile.old > /dev/null
diff --git a/lib/subunit/NEWS b/lib/subunit/NEWS
new file mode 100644
index 0000000000..7c933c8f6e
--- /dev/null
+++ b/lib/subunit/NEWS
@@ -0,0 +1,174 @@
+---------------------
+subunit release notes
+---------------------
+
+NEXT (In development)
+---------------------
+
+BUG FIXES
+~~~~~~~~~
+
+* Fix incorrect reference to subunit_test_failf in c/README.
+ (Brad Hards, #524341)
+
+* Fix incorrect ordering of tags method parameters in TestResultDecorator. This
+ is purely cosmetic as the parameters are passed down with no interpretation.
+ (Robert Collins, #537611)
+
+0.0.5
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* make check was failing if subunit wasn't installed due to a missing include
+ path for the test program test_child.
+
+* make distcheck was failing due to a missing $(top_srcdir) rune.
+
+IMPROVEMENTS
+~~~~~~~~~~~~
+
+* New filter `subunit-notify` that will show a notification window with test
+ statistics when the test run finishes.
+
+* subunit.run will now pipe its output to the command in the
+ SUBUNIT_FORMATTER environment variable, if set.
+
+0.0.4
+-----
+
+BUG FIXES
+~~~~~~~~~
+
+* subunit2junitxml -f required a value, this is now fixed and -f acts as a
+ boolean switch with no parameter.
+
+* Building with autoconf 2.65 is now supported.
+
+
+0.0.3
+-----
+
+ CHANGES:
+
+ * License change, by unanimous agreement of contributors to BSD/Apache
+ License Version 2.0. This makes Subunit compatible with more testing
+ frameworks.
+
+ IMPROVEMENTS:
+
+ * CPPUnit is now directly supported: subunit builds a cppunit listener
+ ``libcppunit-subunit``.
+
+ * In the python API ``addExpectedFailure`` and ``addUnexpectedSuccess``
+ from python 2.7/3.1 are now supported. ``addExpectedFailure`` is
+ serialised as ``xfail``, and ``addUnexpectedSuccess`` as ``success``.
+ The ``ProtocolTestCase`` parser now calls outcomes using an extended
+ API that permits attaching arbitrary MIME resources such as text files
+ log entries and so on. This extended API is being developed with the
+ Python testing community, and is in flux. ``TestResult`` objects that
+ do not support the API will be detected and transparently downgraded
+ back to the regular Python unittest API.
+
+ * INSTALLDIRS can be set to control the perl MakeMaker 'INSTALLDIRS'
+ viarable when installing.
+
+ * Multipart test outcomes are tentatively supported; the exact protocol
+ for them, both serialiser and object is not yet finalised. Testers and
+ early adopters are sought. As part of this and also in an attempt to
+ provider a more precise focus on the wire protocol and toolchain,
+ Subunit now depends on testtools (http://launchpad.net/testtools)
+ release 0.9.0 or newer.
+
+ * subunit2junitxml supports a new option, --forward which causes it
+ to forward the raw subunit stream in a similar manner to tee. This
+ is used with the -o option to both write a xml report and get some
+ other subunit filter to process the stream.
+
+ * The C library now has ``subunit_test_skip``.
+
+ BUG FIXES:
+
+ * Install progress_model.py correctly.
+
+ * Non-gcc builds will no longer try to use gcc specific flags.
+ (Thanks trondn-norbye)
+
+ API CHANGES:
+
+ INTERNALS:
+
+0.0.2
+-----
+
+ CHANGES:
+
+ IMPROVEMENTS:
+
+ * A number of filters now support ``--no-passthrough`` to cause all
+ non-subunit content to be discarded. This is useful when precise control
+ over what is output is required - such as with subunit2junitxml.
+
+ * A small perl parser is now included, and a new ``subunit-diff`` tool
+ using that is included. (Jelmer Vernooij)
+
+ * Subunit streams can now include optional, incremental lookahead
+ information about progress. This allows reporters to make estimates
+ about completion, when such information is available. See the README
+ under ``progress`` for more details.
+
+ * ``subunit-filter`` now supports regex filtering via ``--with`` and
+ ``without`` options. (Martin Pool)
+
+ * ``subunit2gtk`` has been added, a filter that shows a GTK summary of a
+ test stream.
+
+ * ``subunit2pyunit`` has a --progress flag which will cause the bzrlib
+ test reporter to be used, which has a textual progress bar. This requires
+ a recent bzrlib as a minor bugfix was required in bzrlib to support this.
+
+ * ``subunit2junitxml`` has been added. This filter converts a subunit
+ stream to a single JUnit style XML stream using the pyjunitxml
+ python library.
+
+ * The shell functions support skipping via ``subunit_skip_test`` now.
+
+ BUG FIXES:
+
+ * ``xfail`` outcomes are now passed to python TestResult's via
+ addExpectedFailure if it is present on the TestResult. Python 2.6 and
+ earlier which do not have this function will have ``xfail`` outcomes
+ passed through as success outcomes as earlier versions of subunit did.
+
+ API CHANGES:
+
+ * tags are no longer passed around in python via the ``TestCase.tags``
+ attribute. Instead ``TestResult.tags(new_tags, gone_tags)`` is called,
+ and like in the protocol, if called while a test is active only applies
+ to that test. (Robert Collins)
+
+ * ``TestResultFilter`` takes a new optional constructor parameter
+ ``filter_predicate``. (Martin Pool)
+
+ * When a progress: directive is encountered in a subunit stream, the
+ python bindings now call the ``progress(offset, whence)`` method on
+ ``TestResult``.
+
+ * When a time: directive is encountered in a subunit stream, the python
+ bindings now call the ``time(seconds)`` method on ``TestResult``.
+
+ INTERNALS:
+
+ * (python) Added ``subunit.test_results.AutoTimingTestResultDecorator``. Most
+ users of subunit will want to wrap their ``TestProtocolClient`` objects
+ in this decorator to get test timing data for performance analysis.
+
+ * (python) ExecTestCase supports passing arguments to test scripts.
+
+ * (python) New helper ``subunit.test_results.HookedTestResultDecorator``
+ which can be used to call some code on every event, without having to
+ implement all the event methods.
+
+ * (python) ``TestProtocolClient.time(a_datetime)`` has been added which
+ causes a timestamp to be output to the stream.
diff --git a/lib/subunit/README b/lib/subunit/README
index c657992c7a..9740d013a5 100644
--- a/lib/subunit/README
+++ b/lib/subunit/README
@@ -1,7 +1,212 @@
-This directory contains some helper code for the Subunit protocol. It is
-a partial import of the code from the upstream subunit project, which can
-be found at https://launchpad.net/subunit.
-To update the snapshot, run update.sh in this directory. When making changes
-here, please also submit them upstream - otherwise they'll be gone by the
-next time we import subunit.
+ subunit: A streaming protocol for test results
+ Copyright (C) 2005-2009 Robert Collins <robertc@robertcollins.net>
+
+ Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ license at the users choice. A copy of both licenses are available in the
+ project source as Apache-2.0 and BSD. You may not use this file except in
+ compliance with one of these two licences.
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ license you chose for the specific language governing permissions and
+ limitations under that license.
+
+ See the COPYING file for full details on the licensing of Subunit.
+
+ subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
+ licence - see python/iso8601/LICENSE for details.
+
+Subunit
+-------
+
+Subunit is a streaming protocol for test results. The protocol is human
+readable and easily generated and parsed. By design all the components of
+the protocol conceptually fit into the xUnit TestCase->TestResult interaction.
+
+Subunit comes with command line filters to process a subunit stream and
+language bindings for python, C, C++ and shell. Bindings are easy to write
+for other languages.
+
+A number of useful things can be done easily with subunit:
+ * Test aggregation: Tests run separately can be combined and then
+ reported/displayed together. For instance, tests from different languages
+ can be shown as a seamless whole.
+ * Test archiving: A test run may be recorded and replayed later.
+ * Test isolation: Tests that may crash or otherwise interact badly with each
+ other can be run seperately and then aggregated, rather than interfering
+ with each other.
+ * Grid testing: subunit can act as the necessary serialisation and
+ deserialiation to get test runs on distributed machines to be reported in
+ real time.
+
+Subunit supplies the following filters:
+ * tap2subunit - convert perl's TestAnythingProtocol to subunit.
+ * subunit2pyunit - convert a subunit stream to pyunit test results.
+ * subunit2gtk - show a subunit stream in GTK.
+ * subunit2junitxml - convert a subunit stream to JUnit's XML format.
+ * subunit-diff - compare two subunit streams.
+ * subunit-filter - filter out tests from a subunit stream.
+ * subunit-ls - list info about tests present in a subunit stream.
+ * subunit-stats - generate a summary of a subunit stream.
+ * subunit-tags - add or remove tags from a stream.
+
+Integration with other tools
+----------------------------
+
+Subunit's language bindings act as integration with various test runners like
+'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
+(typically a few lines) will allow Subunit to be used in more sophisticated
+ways.
+
+Python
+======
+
+Subunit has excellent Python support: most of the filters and tools are written
+in python and there are facilities for using Subunit to increase test isolation
+seamlessly within a test suite.
+
+One simple way to run an existing python test suite and have it output subunit
+is the module ``subunit.run``::
+
+ $ python -m subunit.run mypackage.tests.test_suite
+
+For more information on the Python support Subunit offers , please see
+``pydoc subunit``, or the source in ``python/subunit/__init__.py``
+
+C
+=
+
+Subunit has C bindings to emit the protocol, and comes with a patch for 'check'
+which has been nominally accepted by the 'check' developers. See 'c/README' for
+more details.
+
+C++
+===
+
+The C library is includable and usable directly from C++. A TestListener for
+CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
+
+shell
+=====
+
+Similar to C, the shell bindings consist of simple functions to output protocol
+elements, and a patch for adding subunit output to the 'ShUnit' shell test
+runner. See 'shell/README' for details.
+
+Filter recipes
+--------------
+
+To ignore some failing tests whose root cause is already known::
+
+ subunit-filter --without 'AttributeError.*flavor'
+
+
+The protocol
+------------
+
+Sample subunit wire contents
+----------------------------
+
+The following::
+ test: test foo works
+ success: test foo works.
+ test: tar a file.
+ failure: tar a file. [
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+ ]
+ a writeln to stdout
+
+When run through subunit2pyunit::
+ .F
+ a writeln to stdout
+
+ ========================
+ FAILURE: tar a file.
+ -------------------
+ ..
+ ].. space is eaten.
+ foo.c:34 WARNING foo is not defined.
+
+
+Subunit protocol description
+============================
+
+This description is being ported to an EBNF style. Currently its only partly in
+that style, but should be fairly clear all the same. When in doubt, refer the
+source (and ideally help fix up the description!). Generally the protocol is
+line orientated and consists of either directives and their parameters, or
+when outside a DETAILS region unexpected lines which are not interpreted by
+the parser - they should be forwarded unaltered.
+
+test|testing|test:|testing: test label
+success|success:|successful|successful: test label
+success|success:|successful|successful: test label DETAILS
+failure: test label
+failure: test label DETAILS
+error: test label
+error: test label DETAILS
+skip[:] test label
+skip[:] test label DETAILS
+xfail[:] test label
+xfail[:] test label DETAILS
+progress: [+|-]X
+progress: push
+progress: pop
+tags: [-]TAG ...
+time: YYYY-MM-DD HH:MM:SSZ
+
+DETAILS ::= BRACKETED | MULTIPART
+BRACKETED ::= '[' CR lines ']' CR
+MULTIPART ::= '[ multipart' CR PART* ']' CR
+PART ::= PART_TYPE CR NAME CR PART_BYTES CR
+PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
+PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
+
+unexpected output on stdout -> stdout.
+exit w/0 or last test completing -> error
+
+Tags given outside a test are applied to all following tests
+Tags given after a test: line and before the result line for the same test
+apply only to that test, and inherit the current global tags.
+A '-' before a tag is used to remove tags - e.g. to prevent a global tag
+applying to a single test, or to cancel a global tag.
+
+The progress directive is used to provide progress information about a stream
+so that stream consumer can provide completion estimates, progress bars and so
+on. Stream generators that know how many tests will be present in the stream
+should output "progress: COUNT". Stream filters that add tests should output
+"progress: +COUNT", and those that remove tests should output
+"progress: -COUNT". An absolute count should reset the progress indicators in
+use - it indicates that two separate streams from different generators have
+been trivially concatenated together, and there is no knowledge of how many
+more complete streams are incoming. Smart concatenation could scan each stream
+for their count and sum them, or alternatively translate absolute counts into
+relative counts inline. It is recommended that outputters avoid absolute counts
+unless necessary. The push and pop directives are used to provide local regions
+for progress reporting. This fits with hierarchically operating test
+environments - such as those that organise tests into suites - the top-most
+runner can report on the number of suites, and each suite surround its output
+with a (push, pop) pair. Interpreters should interpret a pop as also advancing
+the progress of the restored level by one step. Encountering progress
+directives between the start and end of a test pair indicates that a previous
+test was interrupted and did not cleanly terminate: it should be implicitly
+closed with an error (the same as when a stream ends with no closing test
+directive for the most recently started test).
+
+The time directive acts as a clock event - it sets the time for all future
+events. The value should be a valid ISO8601 time.
+
+The skip result is used to indicate a test that was found by the runner but not
+fully executed due to some policy or dependency issue. This is represented in
+python using the addSkip interface that testtools
+(https://edge.launchpad.net/testtools) defines. When communicating with a non
+skip aware test result, the test is reported as an error.
+The xfail result is used to indicate a test that was expected to fail failing
+in the expected manner. As this is a normal condition for such tests it is
+represented as a successful test in Python.
+In future, skip and xfail results will be represented semantically in Python,
+but some discussion is underway on the right way to do this.
diff --git a/lib/subunit/c++/README b/lib/subunit/c++/README
new file mode 100644
index 0000000000..7b8184400e
--- /dev/null
+++ b/lib/subunit/c++/README
@@ -0,0 +1,50 @@
+#
+# subunit C++ bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+Currently there are no native C++ bindings for subunit. However the C library
+can be used from C++ safely. A CPPUnit listener is built as part of Subunit to
+allow CPPUnit users to simply get Subunit output.
+
+To use the listener, use pkg-config (or your preferred replacement) to get the
+cflags and link settings from libcppunit_subunit.pc.
+
+In your test driver main, use SubunitTestProgressListener, as shown in this
+example main::
+
+ {
+ // Create the event manager and test controller
+ CPPUNIT_NS::TestResult controller;
+
+ // Add a listener that collects test result
+ // so we can get the overall status.
+ // note this isn't needed for subunit...
+ CPPUNIT_NS::TestResultCollector result;
+ controller.addListener( &result );
+
+ // Add a listener that print test activity in subunit format.
+ CPPUNIT_NS::SubunitTestProgressListener progress;
+ controller.addListener( &progress );
+
+ // Add the top suite to the test runner
+ CPPUNIT_NS::TestRunner runner;
+ runner.addTest( CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest() );
+ runner.run( controller );
+
+ return result.wasSuccessful() ? 0 : 1;
+ }
diff --git a/lib/subunit/c++/SubunitTestProgressListener.cpp b/lib/subunit/c++/SubunitTestProgressListener.cpp
new file mode 100644
index 0000000000..76cd9e1194
--- /dev/null
+++ b/lib/subunit/c++/SubunitTestProgressListener.cpp
@@ -0,0 +1,63 @@
+/* Subunit test listener for cppunit (http://cppunit.sourceforge.net).
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ */
+
+#include <cppunit/Exception.h>
+#include <cppunit/Test.h>
+#include <cppunit/TestFailure.h>
+#include <cppunit/TextOutputter.h>
+#include <iostream>
+
+// Have to be able to import the public interface without config.h.
+#include "SubunitTestProgressListener.h"
+#include "config.h"
+#include "subunit/child.h"
+
+
+CPPUNIT_NS_BEGIN
+
+
+void
+SubunitTestProgressListener::startTest( Test *test )
+{
+ subunit_test_start(test->getName().c_str());
+ last_test_failed = false;
+}
+
+void
+SubunitTestProgressListener::addFailure( const TestFailure &failure )
+{
+ std::ostringstream capture_stream;
+ TextOutputter outputter(NULL, capture_stream);
+ outputter.printFailureLocation(failure.sourceLine());
+ outputter.printFailureDetail(failure.thrownException());
+
+ if (failure.isError())
+ subunit_test_error(failure.failedTestName().c_str(),
+ capture_stream.str().c_str());
+ else
+ subunit_test_fail(failure.failedTestName().c_str(),
+ capture_stream.str().c_str());
+ last_test_failed = true;
+}
+
+void
+SubunitTestProgressListener::endTest( Test *test)
+{
+ if (!last_test_failed)
+ subunit_test_pass(test->getName().c_str());
+}
+
+
+CPPUNIT_NS_END
diff --git a/lib/subunit/c++/SubunitTestProgressListener.h b/lib/subunit/c++/SubunitTestProgressListener.h
new file mode 100644
index 0000000000..5206d833c7
--- /dev/null
+++ b/lib/subunit/c++/SubunitTestProgressListener.h
@@ -0,0 +1,56 @@
+/* Subunit test listener for cppunit (http://cppunit.sourceforge.net).
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ */
+#ifndef CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
+#define CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
+
+#include <cppunit/TestListener.h>
+
+
+CPPUNIT_NS_BEGIN
+
+
+/*!
+ * \brief TestListener that outputs subunit
+ * (http://www.robertcollins.net/unittest/subunit) compatible output.
+ * \ingroup TrackingTestExecution
+ */
+class CPPUNIT_API SubunitTestProgressListener : public TestListener
+{
+public:
+
+ SubunitTestProgressListener() {}
+
+ void startTest( Test *test );
+
+ void addFailure( const TestFailure &failure );
+
+ void endTest( Test *test );
+
+private:
+ /// Prevents the use of the copy constructor.
+ SubunitTestProgressListener( const SubunitTestProgressListener &copy );
+
+ /// Prevents the use of the copy operator.
+ void operator =( const SubunitTestProgressListener &copy );
+
+private:
+ int last_test_failed;
+};
+
+
+CPPUNIT_NS_END
+
+#endif // CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
+
diff --git a/lib/subunit/c/README b/lib/subunit/c/README
new file mode 100644
index 0000000000..b62fd45395
--- /dev/null
+++ b/lib/subunit/c/README
@@ -0,0 +1,68 @@
+#
+# subunit C bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+This subtree contains an implementation of the subunit child protocol.
+Currently I have no plans to write a test runner in C, so I have not written
+an implementation of the parent protocol. [but will happily accept patches].
+This implementation is built using SCons and tested via 'check'.
+See the tests/ directory for the test programs.
+You can use `make check` or `scons check` to run the tests.
+
+The C protocol consists of four functions which you can use to output test
+metadata trivially. See lib/subunit_child.[ch] for details.
+
+However, this is not a test runner - subunit provides no support for [for
+instance] managing assertions, cleaning up on errors etc. You can look at
+'check' (http://check.sourceforge.net/) or
+'gunit' (https://garage.maemo.org/projects/gunit) for C unit test
+frameworks.
+There is a patch for 'check' (check-subunit-*.patch) in this source tree.
+Its also available as request ID #1470750 in the sourceforge request tracker
+http://sourceforge.net/tracker/index.php. The 'check' developers have indicated
+they will merge this during the current release cycle.
+
+If you are a test environment maintainer - either homegrown, or 'check' or
+'gunit' or some other, you will to know how the subunit calls should be used.
+Here is what a manually written test using the bindings might look like:
+
+
+void
+a_test(void) {
+ int result;
+ subunit_test_start("test name");
+ # determine if test passes or fails
+ result = SOME_VALUE;
+ if (!result) {
+ subunit_test_pass("test name");
+ } else {
+ subunit_test_fail("test name",
+ "Something went wrong running something:\n"
+ "exited with result: '%s'", result);
+ }
+}
+
+Which when run with a subunit test runner will generate something like:
+test name ... ok
+
+on success, and:
+
+test name ... FAIL
+
+======================================================================
+FAIL: test name
+----------------------------------------------------------------------
+RemoteError:
+Something went wrong running something:
+exited with result: '1'
diff --git a/lib/subunit/c/include/subunit/child.h b/lib/subunit/c/include/subunit/child.h
new file mode 100644
index 0000000000..0a4e60127b
--- /dev/null
+++ b/lib/subunit/c/include/subunit/child.h
@@ -0,0 +1,79 @@
+/**
+ *
+ * subunit C bindings.
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ **/
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * subunit_test_start:
+ *
+ * Report that a test is starting.
+ * @name: test case name
+ */
+extern void subunit_test_start(char const * const name);
+
+
+/**
+ * subunit_test_pass:
+ *
+ * Report that a test has passed.
+ *
+ * @name: test case name
+ */
+extern void subunit_test_pass(char const * const name);
+
+
+/**
+ * subunit_test_fail:
+ *
+ * Report that a test has failed.
+ * @name: test case name
+ * @error: a string describing the error.
+ */
+extern void subunit_test_fail(char const * const name, char const * const error);
+
+
+/**
+ * subunit_test_error:
+ *
+ * Report that a test has errored. An error is an unintentional failure - i.e.
+ * a segfault rather than a failed assertion.
+ * @name: test case name
+ * @error: a string describing the error.
+ */
+extern void subunit_test_error(char const * const name,
+ char const * const error);
+
+
+/**
+ * subunit_test_skip:
+ *
+ * Report that a test has been skipped. An skip is a test that has not run to
+ * conclusion but hasn't given an error either - its result is unknown.
+ * @name: test case name
+ * @reason: a string describing the reason for the skip.
+ */
+extern void subunit_test_skip(char const * const name,
+ char const * const reason);
+
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/lib/subunit/c/lib/child.c b/lib/subunit/c/lib/child.c
new file mode 100644
index 0000000000..2b59747c0e
--- /dev/null
+++ b/lib/subunit/c/lib/child.c
@@ -0,0 +1,82 @@
+/**
+ *
+ * subunit C child-side bindings: report on tests being run.
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ **/
+
+#include <stdio.h>
+#include <string.h>
+#include "subunit/child.h"
+
+/* Write details about a test event. It is the callers responsibility to ensure
+ * that details are only provided for events the protocol expects details on.
+ * @event: The event - e.g. 'skip'
+ * @name: The test name/id.
+ * @details: The details of the event, may be NULL if no details are present.
+ */
+static void
+subunit_send_event(char const * const event, char const * const name,
+ char const * const details)
+{
+ if (NULL == details) {
+ fprintf(stdout, "%s: %s\n", event, name);
+ } else {
+ fprintf(stdout, "%s: %s [\n", event, name);
+ fprintf(stdout, "%s", details);
+ if (details[strlen(details) - 1] != '\n')
+ fprintf(stdout, "\n");
+ fprintf(stdout, "]\n");
+ }
+ fflush(stdout);
+}
+
+/* these functions all flush to ensure that the test runner knows the action
+ * that has been taken even if the subsequent test etc takes a long time or
+ * never completes (i.e. a segfault).
+ */
+
+void
+subunit_test_start(char const * const name)
+{
+ subunit_send_event("test", name, NULL);
+}
+
+
+void
+subunit_test_pass(char const * const name)
+{
+ /* TODO: add success details as an option */
+ subunit_send_event("success", name, NULL);
+}
+
+
+void
+subunit_test_fail(char const * const name, char const * const error)
+{
+ subunit_send_event("failure", name, error);
+}
+
+
+void
+subunit_test_error(char const * const name, char const * const error)
+{
+ subunit_send_event("error", name, error);
+}
+
+
+void
+subunit_test_skip(char const * const name, char const * const reason)
+{
+ subunit_send_event("skip", name, reason);
+}
diff --git a/lib/subunit/c/tests/test_child.c b/lib/subunit/c/tests/test_child.c
new file mode 100644
index 0000000000..6399eeb645
--- /dev/null
+++ b/lib/subunit/c/tests/test_child.c
@@ -0,0 +1,192 @@
+/**
+ *
+ * subunit C bindings.
+ * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+ *
+ * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+ * license at the users choice. A copy of both licenses are available in the
+ * project source as Apache-2.0 and BSD. You may not use this file except in
+ * compliance with one of these two licences.
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under these licenses is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the license you chose for the specific language governing permissions
+ * and limitations under that license.
+ **/
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <check.h>
+
+#include "subunit/child.h"
+
+/**
+ * Helper function to capture stdout, run some call, and check what
+ * was written.
+ * @expected the expected stdout content
+ * @function the function to call.
+ **/
+static void
+test_stdout_function(char const * expected,
+ void (*function)(void))
+{
+ /* test that the start function emits a correct test: line. */
+ int bytecount;
+ int old_stdout;
+ int new_stdout[2];
+ char buffer[100];
+ /* we need a socketpair to capture stdout in */
+ fail_if(pipe(new_stdout), "Failed to create a socketpair.");
+ /* backup stdout so we can replace it */
+ old_stdout = dup(1);
+ if (old_stdout == -1) {
+ close(new_stdout[0]);
+ close(new_stdout[1]);
+ fail("Failed to backup stdout before replacing.");
+ }
+ /* redirect stdout so we can analyse it */
+ if (dup2(new_stdout[1], 1) != 1) {
+ close(old_stdout);
+ close(new_stdout[0]);
+ close(new_stdout[1]);
+ fail("Failed to redirect stdout");
+ }
+ /* yes this can block. Its a test case with < 100 bytes of output.
+ * DEAL.
+ */
+ function();
+ /* restore stdout now */
+ if (dup2(old_stdout, 1) != 1) {
+ close(old_stdout);
+ close(new_stdout[0]);
+ close(new_stdout[1]);
+ fail("Failed to restore stdout");
+ }
+ /* and we dont need the write side any more */
+ if (close(new_stdout[1])) {
+ close(new_stdout[0]);
+ fail("Failed to close write side of socketpair.");
+ }
+ /* get the output */
+ bytecount = read(new_stdout[0], buffer, 100);
+ if (0 > bytecount) {
+ close(new_stdout[0]);
+ fail("Failed to read captured output.");
+ }
+ buffer[bytecount]='\0';
+ /* and we dont need the read side any more */
+ fail_if(close(new_stdout[0]), "Failed to close write side of socketpair.");
+ /* compare with expected outcome */
+ fail_if(strcmp(expected, buffer), "Did not get expected output [%s], got [%s]", expected, buffer);
+}
+
+
+static void
+call_test_start(void)
+{
+ subunit_test_start("test case");
+}
+
+
+START_TEST (test_start)
+{
+ test_stdout_function("test: test case\n", call_test_start);
+}
+END_TEST
+
+
+static void
+call_test_pass(void)
+{
+ subunit_test_pass("test case");
+}
+
+
+START_TEST (test_pass)
+{
+ test_stdout_function("success: test case\n", call_test_pass);
+}
+END_TEST
+
+
+static void
+call_test_fail(void)
+{
+ subunit_test_fail("test case", "Multiple lines\n of error\n");
+}
+
+
+START_TEST (test_fail)
+{
+ test_stdout_function("failure: test case [\n"
+ "Multiple lines\n"
+ " of error\n"
+ "]\n",
+ call_test_fail);
+}
+END_TEST
+
+
+static void
+call_test_error(void)
+{
+ subunit_test_error("test case", "Multiple lines\n of output\n");
+}
+
+
+START_TEST (test_error)
+{
+ test_stdout_function("error: test case [\n"
+ "Multiple lines\n"
+ " of output\n"
+ "]\n",
+ call_test_error);
+}
+END_TEST
+
+
+static void
+call_test_skip(void)
+{
+ subunit_test_skip("test case", "Multiple lines\n of output\n");
+}
+
+
+START_TEST (test_skip)
+{
+ test_stdout_function("skip: test case [\n"
+ "Multiple lines\n"
+ " of output\n"
+ "]\n",
+ call_test_skip);
+}
+END_TEST
+
+static Suite *
+child_suite(void)
+{
+ Suite *s = suite_create("subunit_child");
+ TCase *tc_core = tcase_create("Core");
+ suite_add_tcase (s, tc_core);
+ tcase_add_test (tc_core, test_start);
+ tcase_add_test (tc_core, test_pass);
+ tcase_add_test (tc_core, test_fail);
+ tcase_add_test (tc_core, test_error);
+ tcase_add_test (tc_core, test_skip);
+ return s;
+}
+
+
+int
+main(void)
+{
+ int nf;
+ Suite *s = child_suite();
+ SRunner *sr = srunner_create(s);
+ srunner_run_all(sr, CK_NORMAL);
+ nf = srunner_ntests_failed(sr);
+ srunner_free(sr);
+ return (nf == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
+}
diff --git a/lib/subunit/configure.ac b/lib/subunit/configure.ac
new file mode 100644
index 0000000000..496aea5719
--- /dev/null
+++ b/lib/subunit/configure.ac
@@ -0,0 +1,75 @@
+m4_define([SUBUNIT_MAJOR_VERSION], [0])
+m4_define([SUBUNIT_MINOR_VERSION], [0])
+m4_define([SUBUNIT_MICRO_VERSION], [5])
+m4_define([SUBUNIT_VERSION],
+m4_defn([SUBUNIT_MAJOR_VERSION]).m4_defn([SUBUNIT_MINOR_VERSION]).m4_defn([SUBUNIT_MICRO_VERSION]))
+AC_PREREQ([2.59])
+AC_INIT([subunit], [SUBUNIT_VERSION], [subunit-dev@lists.launchpad.net])
+AC_CONFIG_SRCDIR([c/lib/child.c])
+AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects])
+AC_CONFIG_MACRO_DIR([m4])
+[SUBUNIT_MAJOR_VERSION]=SUBUNIT_MAJOR_VERSION
+[SUBUNIT_MINOR_VERSION]=SUBUNIT_MINOR_VERSION
+[SUBUNIT_MICRO_VERSION]=SUBUNIT_MICRO_VERSION
+[SUBUNIT_VERSION]=SUBUNIT_VERSION
+AC_SUBST([SUBUNIT_MAJOR_VERSION])
+AC_SUBST([SUBUNIT_MINOR_VERSION])
+AC_SUBST([SUBUNIT_MICRO_VERSION])
+AC_SUBST([SUBUNIT_VERSION])
+AC_USE_SYSTEM_EXTENSIONS
+AC_PROG_CC
+AC_PROG_CXX
+AM_PROG_CC_C_O
+AC_PROG_INSTALL
+AC_PROG_LN_S
+AC_PROG_LIBTOOL
+AM_PATH_PYTHON
+
+AS_IF([test "$GCC" = "yes"],
+ [
+ SUBUNIT_CFLAGS="-Wall -Werror -Wextra -Wstrict-prototypes "
+ SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wmissing-prototypes -Wwrite-strings "
+ SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wno-variadic-macros "
+ SUBUNIT_CXXFLAGS="-Wall -Werror -Wextra -Wwrite-strings -Wno-variadic-macros"
+ ])
+
+AM_CFLAGS="$SUBUNIT_CFLAGS -I\$(top_srcdir)/c/include"
+AM_CXXFLAGS="$SUBUNIT_CXXFLAGS -I\$(top_srcdir)/c/include"
+AC_SUBST(AM_CFLAGS)
+AC_SUBST(AM_CXXFLAGS)
+
+# Checks for libraries.
+
+# Checks for header files.
+AC_CHECK_HEADERS([stdlib.h])
+
+# Checks for typedefs, structures, and compiler characteristics.
+AC_C_CONST
+AC_TYPE_PID_T
+AC_TYPE_SIZE_T
+AC_HEADER_TIME
+AC_STRUCT_TM
+
+AC_CHECK_SIZEOF(int, 4)
+AC_CHECK_SIZEOF(short, 2)
+AC_CHECK_SIZEOF(long, 4)
+
+# Checks for library functions.
+AC_FUNC_MALLOC
+AC_FUNC_REALLOC
+
+# Easier memory management.
+# C unit testing.
+PKG_CHECK_MODULES([CHECK], [check >= 0.9.4])
+# C++ unit testing.
+PKG_CHECK_MODULES([CPPUNIT], [cppunit])
+
+# Output files
+AC_CONFIG_HEADERS([config.h])
+
+AC_CONFIG_FILES([libsubunit.pc
+ libcppunit_subunit.pc
+ Makefile
+ perl/Makefile.PL
+ ])
+AC_OUTPUT
diff --git a/lib/subunit/filters/subunit-filter b/lib/subunit/filters/subunit-filter
new file mode 100755
index 0000000000..c06a03a827
--- /dev/null
+++ b/lib/subunit/filters/subunit-filter
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
+# (C) 2009 Martin Pool
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to include/exclude tests.
+
+The default is to strip successful tests.
+
+Tests can be filtered by Python regular expressions with --with and --without,
+which match both the test name and the error text (if any). The result
+contains tests which match any of the --with expressions and none of the
+--without expressions. For case-insensitive matching prepend '(?i)'.
+Remember to quote shell metacharacters.
+"""
+
+from optparse import OptionParser
+import sys
+import unittest
+import re
+
+from subunit import (
+ DiscardStream,
+ ProtocolTestCase,
+ TestProtocolClient,
+ )
+from subunit.test_results import TestResultFilter
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--error", action="store_false",
+ help="include errors", default=False, dest="error")
+parser.add_option("-e", "--no-error", action="store_true",
+ help="exclude errors", dest="error")
+parser.add_option("--failure", action="store_false",
+ help="include failures", default=False, dest="failure")
+parser.add_option("-f", "--no-failure", action="store_true",
+ help="include failures", dest="failure")
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("-s", "--success", action="store_false",
+ help="include successes", dest="success")
+parser.add_option("--no-skip", action="store_true",
+ help="exclude skips", dest="skip")
+parser.add_option("--no-success", action="store_true",
+ help="exclude successes", default=True, dest="success")
+parser.add_option("-m", "--with", type=str,
+ help="regexp to include (case-sensitive by default)",
+ action="append", dest="with_regexps")
+parser.add_option("--without", type=str,
+ help="regexp to exclude (case-sensitive by default)",
+ action="append", dest="without_regexps")
+
+(options, args) = parser.parse_args()
+
+
+def _compile_re_from_list(l):
+ return re.compile("|".join(l), re.MULTILINE)
+
+
+def _make_regexp_filter(with_regexps, without_regexps):
+ """Make a callback that checks tests against regexps.
+
+ with_regexps and without_regexps are each either a list of regexp strings,
+ or None.
+ """
+ with_re = with_regexps and _compile_re_from_list(with_regexps)
+ without_re = without_regexps and _compile_re_from_list(without_regexps)
+
+ def check_regexps(test, outcome, err, details):
+ """Check if this test and error match the regexp filters."""
+ test_str = str(test) + outcome + str(err) + str(details)
+ if with_re and not with_re.search(test_str):
+ return False
+ if without_re and without_re.search(test_str):
+ return False
+ return True
+ return check_regexps
+
+
+regexp_filter = _make_regexp_filter(options.with_regexps,
+ options.without_regexps)
+result = TestProtocolClient(sys.stdout)
+result = TestResultFilter(result, filter_error=options.error,
+ filter_failure=options.failure, filter_success=options.success,
+ filter_skip=options.skip,
+ filter_predicate=regexp_filter)
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
+test.run(result)
+sys.exit(0)
diff --git a/lib/subunit/filters/subunit-ls b/lib/subunit/filters/subunit-ls
new file mode 100755
index 0000000000..15ec4b01e6
--- /dev/null
+++ b/lib/subunit/filters/subunit-ls
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""List tests in a subunit stream."""
+
+from optparse import OptionParser
+import sys
+import unittest
+
+from subunit import DiscardStream, ProtocolTestCase
+
+class TestIdPrintingResult(unittest.TestResult):
+
+ def __init__(self, stream, show_times=False):
+ """Create a FilterResult object outputting to stream."""
+ unittest.TestResult.__init__(self)
+ self._stream = stream
+ self.failed_tests = 0
+ self.__time = 0
+ self.show_times = show_times
+ self._test = None
+ self._test_duration = 0
+
+ def addError(self, test, err):
+ self.failed_tests += 1
+ self._test = test
+
+ def addFailure(self, test, err):
+ self.failed_tests += 1
+ self._test = test
+
+ def addSuccess(self, test):
+ self._test = test
+
+ def reportTest(self, test, duration):
+ if self.show_times:
+ seconds = duration.seconds
+ seconds += duration.days * 3600 * 24
+ seconds += duration.microseconds / 1000000.0
+ self._stream.write(test.id() + ' %0.3f\n' % seconds)
+ else:
+ self._stream.write(test.id() + '\n')
+
+ def startTest(self, test):
+ self._start_time = self._time()
+
+ def stopTest(self, test):
+ test_duration = self._time() - self._start_time
+ self.reportTest(self._test, test_duration)
+
+ def time(self, time):
+ self.__time = time
+
+ def _time(self):
+ return self.__time
+
+ def wasSuccessful(self):
+ "Tells whether or not this result was a success"
+ return self.failed_tests == 0
+
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--times", action="store_true",
+ help="list the time each test took (requires a timestamped stream)",
+ default=False)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+(options, args) = parser.parse_args()
+result = TestIdPrintingResult(sys.stdout, options.times)
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
+test.run(result)
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-notify b/lib/subunit/filters/subunit-notify
new file mode 100755
index 0000000000..758e7fc8ff
--- /dev/null
+++ b/lib/subunit/filters/subunit-notify
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Notify the user of a finished test run."""
+
+from optparse import OptionParser
+import sys
+
+import pygtk
+pygtk.require('2.0')
+import pynotify
+
+from subunit import DiscardStream, ProtocolTestCase, TestResultStats
+
+if not pynotify.init("Subunit-notify"):
+ sys.exit(1)
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("-f", "--forward", action="store_true", default=False,
+ help="Forward subunit stream on stdout.")
+(options, args) = parser.parse_args()
+result = TestResultStats(sys.stdout)
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+if options.forward:
+ forward_stream = sys.stdout
+else:
+ forward_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream,
+ forward=forward_stream)
+test.run(result)
+if result.failed_tests > 0:
+ summary = "Test run failed"
+else:
+ summary = "Test run successful"
+body = "Total tests: %d; Passed: %d; Failed: %d" % (
+ result.total_tests,
+ result.passed_tests,
+ result.failed_tests,
+ )
+nw = pynotify.Notification(summary, body)
+nw.show()
+
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-stats b/lib/subunit/filters/subunit-stats
new file mode 100755
index 0000000000..4734988fc2
--- /dev/null
+++ b/lib/subunit/filters/subunit-stats
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+from optparse import OptionParser
+import sys
+import unittest
+
+from subunit import DiscardStream, ProtocolTestCase, TestResultStats
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+(options, args) = parser.parse_args()
+result = TestResultStats(sys.stdout)
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
+test.run(result)
+result.formatStats()
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-tags b/lib/subunit/filters/subunit-tags
new file mode 100755
index 0000000000..edbbfce480
--- /dev/null
+++ b/lib/subunit/filters/subunit-tags
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""A filter to change tags on a subunit stream.
+
+subunit-tags foo -> adds foo
+subunit-tags foo -bar -> adds foo and removes bar
+"""
+
+import sys
+
+from subunit import tag_stream
+sys.exit(tag_stream(sys.stdin, sys.stdout, sys.argv[1:]))
diff --git a/lib/subunit/filters/subunit2gtk b/lib/subunit/filters/subunit2gtk
new file mode 100755
index 0000000000..c2cb2de3ce
--- /dev/null
+++ b/lib/subunit/filters/subunit2gtk
@@ -0,0 +1,259 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+### The GTK progress bar __init__ function is derived from the pygtk tutorial:
+# The PyGTK Tutorial is Copyright (C) 2001-2005 John Finlay.
+#
+# The GTK Tutorial is Copyright (C) 1997 Ian Main.
+#
+# Copyright (C) 1998-1999 Tony Gale.
+#
+# Permission is granted to make and distribute verbatim copies of this manual
+# provided the copyright notice and this permission notice are preserved on all
+# copies.
+#
+# Permission is granted to copy and distribute modified versions of this
+# document under the conditions for verbatim copying, provided that this
+# copyright notice is included exactly as in the original, and that the entire
+# resulting derived work is distributed under the terms of a permission notice
+# identical to this one.
+#
+# Permission is granted to copy and distribute translations of this document
+# into another language, under the above conditions for modified versions.
+#
+# If you are intending to incorporate this document into a published work,
+# please contact the maintainer, and we will make an effort to ensure that you
+# have the most up to date information available.
+#
+# There is no guarantee that this document lives up to its intended purpose.
+# This is simply provided as a free resource. As such, the authors and
+# maintainers of the information provided within can not make any guarantee
+# that the information is even accurate.
+
+"""Display a subunit stream in a gtk progress window."""
+
+import sys
+import unittest
+
+import pygtk
+pygtk.require('2.0')
+import gtk, gtk.gdk, gobject
+
+from subunit import (
+ PROGRESS_POP,
+ PROGRESS_PUSH,
+ PROGRESS_SET,
+ TestProtocolServer,
+ )
+from subunit.progress_model import ProgressModel
+
+
+class GTKTestResult(unittest.TestResult):
+
+ def __init__(self):
+ super(GTKTestResult, self).__init__()
+ # Instance variables (in addition to TestResult)
+ self.window = None
+ self.run_label = None
+ self.ok_label = None
+ self.not_ok_label = None
+ self.total_tests = None
+
+ self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+ self.window.set_resizable(True)
+
+ self.window.connect("destroy", gtk.main_quit)
+ self.window.set_title("Tests...")
+ self.window.set_border_width(0)
+
+ vbox = gtk.VBox(False, 5)
+ vbox.set_border_width(10)
+ self.window.add(vbox)
+ vbox.show()
+
+ # Create a centering alignment object
+ align = gtk.Alignment(0.5, 0.5, 0, 0)
+ vbox.pack_start(align, False, False, 5)
+ align.show()
+
+ # Create the ProgressBar
+ self.pbar = gtk.ProgressBar()
+ align.add(self.pbar)
+ self.pbar.set_text("Running")
+ self.pbar.show()
+ self.progress_model = ProgressModel()
+
+ separator = gtk.HSeparator()
+ vbox.pack_start(separator, False, False, 0)
+ separator.show()
+
+ # rows, columns, homogeneous
+ table = gtk.Table(2, 3, False)
+ vbox.pack_start(table, False, True, 0)
+ table.show()
+ # Show summary details about the run. Could use an expander.
+ label = gtk.Label("Run:")
+ table.attach(label, 0, 1, 1, 2, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.run_label = gtk.Label("N/A")
+ table.attach(self.run_label, 1, 2, 1, 2, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.run_label.show()
+
+ label = gtk.Label("OK:")
+ table.attach(label, 0, 1, 2, 3, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.ok_label = gtk.Label("N/A")
+ table.attach(self.ok_label, 1, 2, 2, 3, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.ok_label.show()
+
+ label = gtk.Label("Not OK:")
+ table.attach(label, 0, 1, 3, 4, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ label.show()
+ self.not_ok_label = gtk.Label("N/A")
+ table.attach(self.not_ok_label, 1, 2, 3, 4, gtk.EXPAND | gtk.FILL,
+ gtk.EXPAND | gtk.FILL, 5, 5)
+ self.not_ok_label.show()
+
+ self.window.show()
+ # For the demo.
+ self.window.set_keep_above(True)
+ self.window.present()
+
+ def stopTest(self, test):
+ super(GTKTestResult, self).stopTest(test)
+ self.progress_model.advance()
+ if self.progress_model.width() == 0:
+ self.pbar.pulse()
+ else:
+ pos = self.progress_model.pos()
+ width = self.progress_model.width()
+ percentage = (pos / float(width))
+ self.pbar.set_fraction(percentage)
+
+ def stopTestRun(self):
+ try:
+ super(GTKTestResult, self).stopTestRun()
+ except AttributeError:
+ pass
+ self.pbar.set_text('Finished')
+
+ def addError(self, test, err):
+ super(GTKTestResult, self).addError(test, err)
+ self.update_counts()
+
+ def addFailure(self, test, err):
+ super(GTKTestResult, self).addFailure(test, err)
+ self.update_counts()
+
+ def addSuccess(self, test):
+ super(GTKTestResult, self).addSuccess(test)
+ self.update_counts()
+
+ def addSkip(self, test, reason):
+ # addSkip is new in Python 2.7/3.1
+ addSkip = getattr(super(GTKTestResult, self), 'addSkip', None)
+ if callable(addSkip):
+ addSkip(test, reason)
+ self.update_counts()
+
+ def addExpectedFailure(self, test, err):
+ # addExpectedFailure is new in Python 2.7/3.1
+ addExpectedFailure = getattr(super(GTKTestResult, self),
+ 'addExpectedFailure', None)
+ if callable(addExpectedFailure):
+ addExpectedFailure(test, err)
+ self.update_counts()
+
+ def addUnexpectedSuccess(self, test):
+ # addUnexpectedSuccess is new in Python 2.7/3.1
+ addUnexpectedSuccess = getattr(super(GTKTestResult, self),
+ 'addUnexpectedSuccess', None)
+ if callable(addUnexpectedSuccess):
+ addUnexpectedSuccess(test)
+ self.update_counts()
+
+ def progress(self, offset, whence):
+ if whence == PROGRESS_PUSH:
+ self.progress_model.push()
+ elif whence == PROGRESS_POP:
+ self.progress_model.pop()
+ elif whence == PROGRESS_SET:
+ self.total_tests = offset
+ self.progress_model.set_width(offset)
+ else:
+ self.total_tests += offset
+ self.progress_model.adjust_width(offset)
+
+ def time(self, a_datetime):
+ # We don't try to estimate completion yet.
+ pass
+
+ def update_counts(self):
+ self.run_label.set_text(str(self.testsRun))
+ bad = len(self.failures + self.errors)
+ self.ok_label.set_text(str(self.testsRun - bad))
+ self.not_ok_label.set_text(str(bad))
+
+
+class GIOProtocolTestCase(object):
+
+ def __init__(self, stream, result, on_finish):
+ self.stream = stream
+ self.schedule_read()
+ self.hup_id = gobject.io_add_watch(stream, gobject.IO_HUP, self.hup)
+ self.protocol = TestProtocolServer(result)
+ self.on_finish = on_finish
+
+ def read(self, source, condition, all=False):
+ #NB: \o/ actually blocks
+ line = source.readline()
+ if not line:
+ self.protocol.lostConnection()
+ self.on_finish()
+ return False
+ self.protocol.lineReceived(line)
+ # schedule more IO shortly - if we say we're willing to do it
+ # immediately we starve things.
+ if not all:
+ source_id = gobject.timeout_add(1, self.schedule_read)
+ return False
+ else:
+ return True
+
+ def schedule_read(self):
+ self.read_id = gobject.io_add_watch(self.stream, gobject.IO_IN, self.read)
+
+ def hup(self, source, condition):
+ while self.read(source, condition, all=True): pass
+ self.protocol.lostConnection()
+ gobject.source_remove(self.read_id)
+ self.on_finish()
+ return False
+
+
+result = GTKTestResult()
+test = GIOProtocolTestCase(sys.stdin, result, result.stopTestRun)
+gtk.main()
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit2junitxml b/lib/subunit/filters/subunit2junitxml
new file mode 100755
index 0000000000..bea795d2bd
--- /dev/null
+++ b/lib/subunit/filters/subunit2junitxml
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Filter a subunit stream to get aggregate statistics."""
+
+from optparse import OptionParser
+import sys
+import unittest
+
+from subunit import DiscardStream, ProtocolTestCase
+try:
+ from junitxml import JUnitXmlResult
+except ImportError:
+ sys.stderr.write("python-junitxml (https://launchpad.net/pyjunitxml or "
+ "http://pypi.python.org/pypi/junitxml) is required for this filter.")
+ raise
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("-o", "--output-to",
+ help="Output the XML to this path rather than stdout.")
+parser.add_option("-f", "--forward", action="store_true", default=False,
+ help="Forward subunit stream on stdout.")
+(options, args) = parser.parse_args()
+if options.output_to is None:
+ output_to = sys.stdout
+else:
+ output_to = file(options.output_to, 'wb')
+try:
+ result = JUnitXmlResult(output_to)
+ if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+ else:
+ passthrough_stream = None
+ if options.forward:
+ forward_stream = sys.stdout
+ else:
+ forward_stream = None
+ test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream,
+ forward=forward_stream)
+ result.startTestRun()
+ test.run(result)
+ result.stopTestRun()
+finally:
+ if options.output_to is not None:
+ output_to.close()
+if result.wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit2pyunit b/lib/subunit/filters/subunit2pyunit
new file mode 100755
index 0000000000..83a23d14d1
--- /dev/null
+++ b/lib/subunit/filters/subunit2pyunit
@@ -0,0 +1,48 @@
+#!/usr/bin/env python
+# subunit: extensions to python unittest to get test results from subprocesses.
+# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+"""Display a subunit stream through python's unittest test runner."""
+
+from optparse import OptionParser
+import sys
+import unittest
+
+from subunit import DiscardStream, ProtocolTestCase, TestProtocolServer
+
+parser = OptionParser(description=__doc__)
+parser.add_option("--no-passthrough", action="store_true",
+ help="Hide all non subunit input.", default=False, dest="no_passthrough")
+parser.add_option("--progress", action="store_true",
+ help="Use bzrlib's test reporter (requires bzrlib)",
+ default=False)
+(options, args) = parser.parse_args()
+if options.no_passthrough:
+ passthrough_stream = DiscardStream()
+else:
+ passthrough_stream = None
+test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
+if options.progress:
+ from bzrlib.tests import TextTestRunner
+ from bzrlib import ui
+ ui.ui_factory = ui.make_ui_for_terminal(None, sys.stdout, sys.stderr)
+ runner = TextTestRunner()
+else:
+ runner = unittest.TextTestRunner(verbosity=2)
+if runner.run(test).wasSuccessful():
+ exit_code = 0
+else:
+ exit_code = 1
+sys.exit(exit_code)
diff --git a/lib/subunit/libcppunit_subunit.pc.in b/lib/subunit/libcppunit_subunit.pc.in
new file mode 100644
index 0000000000..98982c78ae
--- /dev/null
+++ b/lib/subunit/libcppunit_subunit.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: cppunit subunit listener
+Description: Subunit output listener for the CPPUnit test library.
+URL: http://launchpad.net/subunit
+Version: @VERSION@
+Libs: -L${libdir} -lsubunit
+Cflags: -I${includedir}
diff --git a/lib/subunit/libsubunit.pc.in b/lib/subunit/libsubunit.pc.in
new file mode 100644
index 0000000000..67564148e8
--- /dev/null
+++ b/lib/subunit/libsubunit.pc.in
@@ -0,0 +1,11 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: subunit
+Description: Subunit test protocol library.
+URL: http://launchpad.net/subunit
+Version: @VERSION@
+Libs: -L${libdir} -lsubunit
+Cflags: -I${includedir}
diff --git a/lib/subunit/perl/Makefile.PL.in b/lib/subunit/perl/Makefile.PL.in
new file mode 100755
index 0000000000..26e1c181f0
--- /dev/null
+++ b/lib/subunit/perl/Makefile.PL.in
@@ -0,0 +1,20 @@
+use ExtUtils::MakeMaker;
+WriteMakefile(
+ 'INSTALL_BASE' => '@prefix@',
+ 'NAME' => 'Subunit',
+ 'VERSION' => '@SUBUNIT_VERSION@',
+ 'test' => { 'TESTS' => 'tests/*.pl' },
+ 'PMLIBDIRS' => [ 'lib' ],
+ 'EXE_FILES' => [ '@abs_srcdir@/subunit-diff' ],
+);
+sub MY::postamble {
+<<'EOT';
+check: # test
+
+uninstall_distcheck:
+ rm -fr $(DESTINSTALLARCHLIB)
+
+VPATH = @srcdir@
+.PHONY: uninstall_distcheck
+EOT
+}
diff --git a/lib/subunit/perl/lib/Subunit.pm b/lib/subunit/perl/lib/Subunit.pm
new file mode 100644
index 0000000000..05206748e2
--- /dev/null
+++ b/lib/subunit/perl/lib/Subunit.pm
@@ -0,0 +1,162 @@
+# Perl module for parsing and generating the Subunit protocol
+# Copyright (C) 2008-2009 Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+package Subunit;
+use POSIX;
+
+require Exporter;
+@ISA = qw(Exporter);
+@EXPORT_OK = qw(parse_results $VERSION);
+
+use vars qw ( $VERSION );
+
+$VERSION = '0.0.2';
+
+use strict;
+
+sub parse_results($$$)
+{
+ my ($msg_ops, $statistics, $fh) = @_;
+ my $expected_fail = 0;
+ my $unexpected_fail = 0;
+ my $unexpected_err = 0;
+ my $open_tests = [];
+
+ while(<$fh>) {
+ if (/^test: (.+)\n/) {
+ $msg_ops->control_msg($_);
+ $msg_ops->start_test($1);
+ push (@$open_tests, $1);
+ } elsif (/^time: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)Z\n/) {
+ $msg_ops->report_time(mktime($6, $5, $4, $3, $2, $1-1900));
+ } elsif (/^(success|successful|failure|fail|skip|knownfail|error|xfail): (.*?)( \[)?([ \t]*)\n/) {
+ $msg_ops->control_msg($_);
+ my $result = $1;
+ my $testname = $2;
+ my $reason = undef;
+ if ($3) {
+ $reason = "";
+ # reason may be specified in next lines
+ my $terminated = 0;
+ while(<$fh>) {
+ $msg_ops->control_msg($_);
+ if ($_ eq "]\n") { $terminated = 1; last; } else { $reason .= $_; }
+ }
+
+ unless ($terminated) {
+ $statistics->{TESTS_ERROR}++;
+ $msg_ops->end_test($testname, "error", 1, "reason ($result) interrupted");
+ return 1;
+ }
+ }
+ if ($result eq "success" or $result eq "successful") {
+ pop(@$open_tests); #FIXME: Check that popped value == $testname
+ $statistics->{TESTS_EXPECTED_OK}++;
+ $msg_ops->end_test($testname, $result, 0, $reason);
+ } elsif ($result eq "xfail" or $result eq "knownfail") {
+ pop(@$open_tests); #FIXME: Check that popped value == $testname
+ $statistics->{TESTS_EXPECTED_FAIL}++;
+ $msg_ops->end_test($testname, $result, 0, $reason);
+ $expected_fail++;
+ } elsif ($result eq "failure" or $result eq "fail") {
+ pop(@$open_tests); #FIXME: Check that popped value == $testname
+ $statistics->{TESTS_UNEXPECTED_FAIL}++;
+ $msg_ops->end_test($testname, $result, 1, $reason);
+ $unexpected_fail++;
+ } elsif ($result eq "skip") {
+ $statistics->{TESTS_SKIP}++;
+ my $last = pop(@$open_tests);
+ if (defined($last) and $last ne $testname) {
+ push (@$open_tests, $testname);
+ }
+ $msg_ops->end_test($testname, $result, 0, $reason);
+ } elsif ($result eq "error") {
+ $statistics->{TESTS_ERROR}++;
+ pop(@$open_tests); #FIXME: Check that popped value == $testname
+ $msg_ops->end_test($testname, $result, 1, $reason);
+ $unexpected_err++;
+ }
+ } else {
+ $msg_ops->output_msg($_);
+ }
+ }
+
+ while ($#$open_tests+1 > 0) {
+ $msg_ops->end_test(pop(@$open_tests), "error", 1,
+ "was started but never finished!");
+ $statistics->{TESTS_ERROR}++;
+ $unexpected_err++;
+ }
+
+ return 1 if $unexpected_err > 0;
+ return 1 if $unexpected_fail > 0;
+ return 0;
+}
+
+sub start_test($)
+{
+ my ($testname) = @_;
+ print "test: $testname\n";
+}
+
+sub end_test($$;$)
+{
+ my $name = shift;
+ my $result = shift;
+ my $reason = shift;
+ if ($reason) {
+ print "$result: $name [\n";
+ print "$reason";
+ print "]\n";
+ } else {
+ print "$result: $name\n";
+ }
+}
+
+sub skip_test($;$)
+{
+ my $name = shift;
+ my $reason = shift;
+ end_test($name, "skip", $reason);
+}
+
+sub fail_test($;$)
+{
+ my $name = shift;
+ my $reason = shift;
+ end_test($name, "fail", $reason);
+}
+
+sub success_test($;$)
+{
+ my $name = shift;
+ my $reason = shift;
+ end_test($name, "success", $reason);
+}
+
+sub xfail_test($;$)
+{
+ my $name = shift;
+ my $reason = shift;
+ end_test($name, "xfail", $reason);
+}
+
+sub report_time($)
+{
+ my ($time) = @_;
+ my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime($time);
+ printf "time: %04d-%02d-%02d %02d:%02d:%02dZ\n", $year+1900, $mon, $mday, $hour, $min, $sec;
+}
+
+1;
diff --git a/lib/subunit/perl/lib/Subunit/Diff.pm b/lib/subunit/perl/lib/Subunit/Diff.pm
new file mode 100644
index 0000000000..e7841c3b00
--- /dev/null
+++ b/lib/subunit/perl/lib/Subunit/Diff.pm
@@ -0,0 +1,85 @@
+#!/usr/bin/perl
+# Diff two subunit streams
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+package Subunit::Diff;
+
+use strict;
+
+use Subunit qw(parse_results);
+
+sub control_msg() { }
+sub report_time($$) { }
+
+sub output_msg($$)
+{
+ my ($self, $msg) = @_;
+
+ # No output for now, perhaps later diff this as well ?
+}
+
+sub start_test($$)
+{
+ my ($self, $testname) = @_;
+}
+
+sub end_test($$$$$)
+{
+ my ($self, $testname, $result, $unexpected, $reason) = @_;
+
+ $self->{$testname} = $result;
+}
+
+sub new {
+ my ($class) = @_;
+
+ my $self = {
+ };
+ bless($self, $class);
+}
+
+sub from_file($)
+{
+ my ($path) = @_;
+ my $statistics = {
+ TESTS_UNEXPECTED_OK => 0,
+ TESTS_EXPECTED_OK => 0,
+ TESTS_UNEXPECTED_FAIL => 0,
+ TESTS_EXPECTED_FAIL => 0,
+ TESTS_ERROR => 0,
+ TESTS_SKIP => 0,
+ };
+
+ my $ret = new Subunit::Diff();
+ open(IN, $path) or return;
+ parse_results($ret, $statistics, *IN);
+ close(IN);
+ return $ret;
+}
+
+sub diff($$)
+{
+ my ($old, $new) = @_;
+ my $ret = {};
+
+ foreach my $testname (keys %$old) {
+ if ($new->{$testname} ne $old->{$testname}) {
+ $ret->{$testname} = [$old->{$testname}, $new->{$testname}];
+ }
+ }
+
+ return $ret;
+}
+
+1;
diff --git a/lib/subunit/perl/subunit-diff b/lib/subunit/perl/subunit-diff
new file mode 100755
index 0000000000..581e832ae3
--- /dev/null
+++ b/lib/subunit/perl/subunit-diff
@@ -0,0 +1,31 @@
+#!/usr/bin/perl
+# Diff two subunit streams
+# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+
+use Getopt::Long;
+use strict;
+use FindBin qw($RealBin $Script);
+use lib "$RealBin/lib";
+use Subunit::Diff;
+
+my $old = Subunit::Diff::from_file($ARGV[0]);
+my $new = Subunit::Diff::from_file($ARGV[1]);
+
+my $ret = Subunit::Diff::diff($old, $new);
+
+foreach my $e (sort(keys %$ret)) {
+ printf "%s: %s -> %s\n", $e, $ret->{$e}[0], $ret->{$e}[1];
+}
+
+0;
diff --git a/lib/subunit/python/testtools/__init__.py b/lib/subunit/python/testtools/__init__.py
deleted file mode 100644
index 0504d661d4..0000000000
--- a/lib/subunit/python/testtools/__init__.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# Copyright (c) 2008, 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Extensions to the standard Python unittest library."""
-
-__all__ = [
- 'clone_test_with_new_id',
- 'ConcurrentTestSuite',
- 'ExtendedToOriginalDecorator',
- 'iterate_tests',
- 'MultiTestResult',
- 'TestCase',
- 'TestResult',
- 'TextTestResult',
- 'RunTest',
- 'skip',
- 'skipIf',
- 'skipUnless',
- 'ThreadsafeForwardingResult',
- ]
-
-from testtools.matchers import (
- Matcher,
- )
-from testtools.runtest import (
- RunTest,
- )
-from testtools.testcase import (
- TestCase,
- clone_test_with_new_id,
- skip,
- skipIf,
- skipUnless,
- )
-from testtools.testresult import (
- ExtendedToOriginalDecorator,
- MultiTestResult,
- TestResult,
- TextTestResult,
- ThreadsafeForwardingResult,
- )
-from testtools.testsuite import (
- ConcurrentTestSuite,
- )
-from testtools.utils import iterate_tests
-
-# same format as sys.version_info: "A tuple containing the five components of
-# the version number: major, minor, micro, releaselevel, and serial. All
-# values except releaselevel are integers; the release level is 'alpha',
-# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
-# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
-# releaselevel of 'dev' for unreleased under-development code.
-#
-# If the releaselevel is 'alpha' then the major/minor/micro components are not
-# established at this point, and setup.py will use a version of next-$(revno).
-# If the releaselevel is 'final', then the tarball will be major.minor.micro.
-# Otherwise it is major.minor.micro~$(revno).
-
-__version__ = (0, 9, 2, 'final', 0)
diff --git a/lib/subunit/python/testtools/content.py b/lib/subunit/python/testtools/content.py
deleted file mode 100644
index 353e3f0f46..0000000000
--- a/lib/subunit/python/testtools/content.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Content - a MIME-like Content object."""
-
-import codecs
-from unittest import TestResult
-
-from testtools.content_type import ContentType
-from testtools.utils import _b
-
-
-class Content(object):
- """A MIME-like Content object.
-
- Content objects can be serialised to bytes using the iter_bytes method.
- If the Content-Type is recognised by other code, they are welcome to
- look for richer contents that mere byte serialisation - for example in
- memory object graphs etc. However, such code MUST be prepared to receive
- a generic Content object that has been reconstructed from a byte stream.
-
- :ivar content_type: The content type of this Content.
- """
-
- def __init__(self, content_type, get_bytes):
- """Create a ContentType."""
- if None in (content_type, get_bytes):
- raise ValueError("None not permitted in %r, %r" % (
- content_type, get_bytes))
- self.content_type = content_type
- self._get_bytes = get_bytes
-
- def __eq__(self, other):
- return (self.content_type == other.content_type and
- ''.join(self.iter_bytes()) == ''.join(other.iter_bytes()))
-
- def iter_bytes(self):
- """Iterate over bytestrings of the serialised content."""
- return self._get_bytes()
-
- def iter_text(self):
- """Iterate over the text of the serialised content.
-
- This is only valid for text MIME types, and will use ISO-8859-1 if
- no charset parameter is present in the MIME type. (This is somewhat
- arbitrary, but consistent with RFC2617 3.7.1).
-
- :raises ValueError: If the content type is not text/\*.
- """
- if self.content_type.type != "text":
- raise ValueError("Not a text type %r" % self.content_type)
- return self._iter_text()
-
- def _iter_text(self):
- """Worker for iter_text - does the decoding."""
- encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
- try:
- # 2.5+
- decoder = codecs.getincrementaldecoder(encoding)()
- for bytes in self.iter_bytes():
- yield decoder.decode(bytes)
- final = decoder.decode(_b(''), True)
- if final:
- yield final
- except AttributeError:
- # < 2.5
- bytes = ''.join(self.iter_bytes())
- yield bytes.decode(encoding)
-
- def __repr__(self):
- return "<Content type=%r, value=%r>" % (
- self.content_type, ''.join(self.iter_bytes()))
-
-
-class TracebackContent(Content):
- """Content object for tracebacks.
-
- This adapts an exc_info tuple to the Content interface.
- text/x-traceback;language=python is used for the mime type, in order to
- provide room for other languages to format their tracebacks differently.
- """
-
- def __init__(self, err, test):
- """Create a TracebackContent for err."""
- if err is None:
- raise ValueError("err may not be None")
- content_type = ContentType('text', 'x-traceback',
- {"language": "python", "charset": "utf8"})
- self._result = TestResult()
- value = self._result._exc_info_to_string(err, test)
- super(TracebackContent, self).__init__(
- content_type, lambda: [value.encode("utf8")])
diff --git a/lib/subunit/python/testtools/content_type.py b/lib/subunit/python/testtools/content_type.py
deleted file mode 100644
index aded81b732..0000000000
--- a/lib/subunit/python/testtools/content_type.py
+++ /dev/null
@@ -1,30 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""ContentType - a MIME Content Type."""
-
-
-class ContentType(object):
- """A content type from http://www.iana.org/assignments/media-types/
-
- :ivar type: The primary type, e.g. "text" or "application"
- :ivar subtype: The subtype, e.g. "plain" or "octet-stream"
- :ivar parameters: A dict of additional parameters specific to the
- content type.
- """
-
- def __init__(self, primary_type, sub_type, parameters=None):
- """Create a ContentType."""
- if None in (primary_type, sub_type):
- raise ValueError("None not permitted in %r, %r" % (
- primary_type, sub_type))
- self.type = primary_type
- self.subtype = sub_type
- self.parameters = parameters or {}
-
- def __eq__(self, other):
- if type(other) != ContentType:
- return False
- return self.__dict__ == other.__dict__
-
- def __repr__(self):
- return "%s/%s params=%s" % (self.type, self.subtype, self.parameters)
diff --git a/lib/subunit/python/testtools/matchers.py b/lib/subunit/python/testtools/matchers.py
deleted file mode 100644
index 039c84b7c7..0000000000
--- a/lib/subunit/python/testtools/matchers.py
+++ /dev/null
@@ -1,282 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Matchers, a way to express complex assertions outside the testcase.
-
-Inspired by 'hamcrest'.
-
-Matcher provides the abstract API that all matchers need to implement.
-
-Bundled matchers are listed in __all__: a list can be obtained by running
-$ python -c 'import testtools.matchers; print testtools.matchers.__all__'
-"""
-
-__metaclass__ = type
-__all__ = [
- 'Annotate',
- 'DocTestMatches',
- 'Equals',
- 'MatchesAll',
- 'MatchesAny',
- 'NotEquals',
- 'Not',
- ]
-
-import doctest
-
-
-class Matcher:
- """A pattern matcher.
-
- A Matcher must implement match and __str__ to be used by
- testtools.TestCase.assertThat. Matcher.match(thing) returns None when
- thing is completely matched, and a Mismatch object otherwise.
-
- Matchers can be useful outside of test cases, as they are simply a
- pattern matching language expressed as objects.
-
- testtools.matchers is inspired by hamcrest, but is pythonic rather than
- a Java transcription.
- """
-
- def match(self, something):
- """Return None if this matcher matches something, a Mismatch otherwise.
- """
- raise NotImplementedError(self.match)
-
- def __str__(self):
- """Get a sensible human representation of the matcher.
-
- This should include the parameters given to the matcher and any
- state that would affect the matches operation.
- """
- raise NotImplementedError(self.__str__)
-
-
-class Mismatch:
- """An object describing a mismatch detected by a Matcher."""
-
- def describe(self):
- """Describe the mismatch.
-
- This should be either a human-readable string or castable to a string.
- """
- raise NotImplementedError(self.describe_difference)
-
-
-class DocTestMatches:
- """See if a string matches a doctest example."""
-
- def __init__(self, example, flags=0):
- """Create a DocTestMatches to match example.
-
- :param example: The example to match e.g. 'foo bar baz'
- :param flags: doctest comparison flags to match on. e.g.
- doctest.ELLIPSIS.
- """
- if not example.endswith('\n'):
- example += '\n'
- self.want = example # required variable name by doctest.
- self.flags = flags
- self._checker = doctest.OutputChecker()
-
- def __str__(self):
- if self.flags:
- flagstr = ", flags=%d" % self.flags
- else:
- flagstr = ""
- return 'DocTestMatches(%r%s)' % (self.want, flagstr)
-
- def _with_nl(self, actual):
- result = str(actual)
- if not result.endswith('\n'):
- result += '\n'
- return result
-
- def match(self, actual):
- with_nl = self._with_nl(actual)
- if self._checker.check_output(self.want, with_nl, self.flags):
- return None
- return DocTestMismatch(self, with_nl)
-
- def _describe_difference(self, with_nl):
- return self._checker.output_difference(self, with_nl, self.flags)
-
-
-class DocTestMismatch:
- """Mismatch object for DocTestMatches."""
-
- def __init__(self, matcher, with_nl):
- self.matcher = matcher
- self.with_nl = with_nl
-
- def describe(self):
- return self.matcher._describe_difference(self.with_nl)
-
-
-class Equals:
- """Matches if the items are equal."""
-
- def __init__(self, expected):
- self.expected = expected
-
- def match(self, other):
- if self.expected == other:
- return None
- return EqualsMismatch(self.expected, other)
-
- def __str__(self):
- return "Equals(%r)" % self.expected
-
-
-class EqualsMismatch:
- """Two things differed."""
-
- def __init__(self, expected, other):
- self.expected = expected
- self.other = other
-
- def describe(self):
- return "%r != %r" % (self.expected, self.other)
-
-
-class NotEquals:
- """Matches if the items are not equal.
-
- In most cases, this is equivalent to `Not(Equals(foo))`. The difference
- only matters when testing `__ne__` implementations.
- """
-
- def __init__(self, expected):
- self.expected = expected
-
- def __str__(self):
- return 'NotEquals(%r)' % (self.expected,)
-
- def match(self, other):
- if self.expected != other:
- return None
- return NotEqualsMismatch(self.expected, other)
-
-
-class NotEqualsMismatch:
- """Two things are the same."""
-
- def __init__(self, expected, other):
- self.expected = expected
- self.other = other
-
- def describe(self):
- return '%r == %r' % (self.expected, self.other)
-
-
-class MatchesAny:
- """Matches if any of the matchers it is created with match."""
-
- def __init__(self, *matchers):
- self.matchers = matchers
-
- def match(self, matchee):
- results = []
- for matcher in self.matchers:
- mismatch = matcher.match(matchee)
- if mismatch is None:
- return None
- results.append(mismatch)
- return MismatchesAll(results)
-
- def __str__(self):
- return "MatchesAny(%s)" % ', '.join([
- str(matcher) for matcher in self.matchers])
-
-
-class MatchesAll:
- """Matches if all of the matchers it is created with match."""
-
- def __init__(self, *matchers):
- self.matchers = matchers
-
- def __str__(self):
- return 'MatchesAll(%s)' % ', '.join(map(str, self.matchers))
-
- def match(self, matchee):
- results = []
- for matcher in self.matchers:
- mismatch = matcher.match(matchee)
- if mismatch is not None:
- results.append(mismatch)
- if results:
- return MismatchesAll(results)
- else:
- return None
-
-
-class MismatchesAll:
- """A mismatch with many child mismatches."""
-
- def __init__(self, mismatches):
- self.mismatches = mismatches
-
- def describe(self):
- descriptions = ["Differences: ["]
- for mismatch in self.mismatches:
- descriptions.append(mismatch.describe())
- descriptions.append("]\n")
- return '\n'.join(descriptions)
-
-
-class Not:
- """Inverts a matcher."""
-
- def __init__(self, matcher):
- self.matcher = matcher
-
- def __str__(self):
- return 'Not(%s)' % (self.matcher,)
-
- def match(self, other):
- mismatch = self.matcher.match(other)
- if mismatch is None:
- return MatchedUnexpectedly(self.matcher, other)
- else:
- return None
-
-
-class MatchedUnexpectedly:
- """A thing matched when it wasn't supposed to."""
-
- def __init__(self, matcher, other):
- self.matcher = matcher
- self.other = other
-
- def describe(self):
- return "%r matches %s" % (self.other, self.matcher)
-
-
-class Annotate:
- """Annotates a matcher with a descriptive string.
-
- Mismatches are then described as '<mismatch>: <annotation>'.
- """
-
- def __init__(self, annotation, matcher):
- self.annotation = annotation
- self.matcher = matcher
-
- def __str__(self):
- return 'Annotate(%r, %s)' % (self.annotation, self.matcher)
-
- def match(self, other):
- mismatch = self.matcher.match(other)
- if mismatch is not None:
- return AnnotatedMismatch(self.annotation, mismatch)
-
-
-class AnnotatedMismatch:
- """A mismatch annotated with a descriptive string."""
-
- def __init__(self, annotation, mismatch):
- self.annotation = annotation
- self.mismatch = mismatch
-
- def describe(self):
- return '%s: %s' % (self.mismatch.describe(), self.annotation)
diff --git a/lib/subunit/python/testtools/run.py b/lib/subunit/python/testtools/run.py
deleted file mode 100755
index c4f461ecfb..0000000000
--- a/lib/subunit/python/testtools/run.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""python -m testtools.run testspec [testspec...]
-
-Run some tests with the testtools extended API.
-
-For instance, to run the testtools test suite.
- $ python -m testtools.run testtools.tests.test_suite
-"""
-
-import sys
-
-from testtools.tests import test_suite
-from testtools import TextTestResult
-
-
-class TestToolsTestRunner(object):
- """ A thunk object to support unittest.TestProgram."""
-
- def run(self, test):
- "Run the given test case or test suite."
- result = TextTestResult(sys.stdout)
- result.startTestRun()
- try:
- return test.run(result)
- finally:
- result.stopTestRun()
-
-
-if __name__ == '__main__':
- import optparse
- from unittest import TestProgram
- parser = optparse.OptionParser(__doc__)
- args = parser.parse_args()[1]
- if not args:
- parser.error("No testspecs given.")
- runner = TestToolsTestRunner()
- program = TestProgram(module=None, argv=[sys.argv[0]] + args,
- testRunner=runner)
diff --git a/lib/subunit/python/testtools/runtest.py b/lib/subunit/python/testtools/runtest.py
deleted file mode 100644
index 053e2205a7..0000000000
--- a/lib/subunit/python/testtools/runtest.py
+++ /dev/null
@@ -1,142 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Individual test case execution."""
-
-__metaclass__ = type
-__all__ = [
- 'RunTest',
- ]
-
-import sys
-
-from testtools.testresult import ExtendedToOriginalDecorator
-
-
-class RunTest:
- """An object to run a test.
-
- RunTest objects are used to implement the internal logic involved in
- running a test. TestCase.__init__ stores _RunTest as the class of RunTest
- to execute. Passing the runTest= parameter to TestCase.__init__ allows a
- different RunTest class to be used to execute the test.
-
- Subclassing or replacing RunTest can be useful to add functionality to the
- way that tests are run in a given project.
-
- :ivar case: The test case that is to be run.
- :ivar result: The result object a case is reporting to.
- :ivar handlers: A list of (ExceptionClass->handler code) for exceptions
- that should be caught if raised from the user code. Exceptions that
- are caught are checked against this list in first to last order.
- There is a catchall of Exception at the end of the list, so to add
- a new exception to the list, insert it at the front (which ensures that
- it will be checked before any existing base classes in the list. If you
- add multiple exceptions some of which are subclasses of each other, add
- the most specific exceptions last (so they come before their parent
- classes in the list).
- :ivar exception_caught: An object returned when _run_user catches an
- exception.
- """
-
- def __init__(self, case, handlers=None):
- """Create a RunTest to run a case.
-
- :param case: A testtools.TestCase test case object.
- :param handlers: Exception handlers for this RunTest. These are stored
- in self.handlers and can be modified later if needed.
- """
- self.case = case
- self.handlers = handlers or []
- self.exception_caught = object()
-
- def run(self, result=None):
- """Run self.case reporting activity to result.
-
- :param result: Optional testtools.TestResult to report activity to.
- :return: The result object the test was run against.
- """
- if result is None:
- actual_result = self.case.defaultTestResult()
- actual_result.startTestRun()
- else:
- actual_result = result
- try:
- return self._run_one(actual_result)
- finally:
- if result is None:
- actual_result.stopTestRun()
-
- def _run_one(self, result):
- """Run one test reporting to result.
-
- :param result: A testtools.TestResult to report activity to.
- This result object is decorated with an ExtendedToOriginalDecorator
- to ensure that the latest TestResult API can be used with
- confidence by client code.
- :return: The result object the test was run against.
- """
- return self._run_prepared_result(ExtendedToOriginalDecorator(result))
-
- def _run_prepared_result(self, result):
- """Run one test reporting to result.
-
- :param result: A testtools.TestResult to report activity to.
- :return: The result object the test was run against.
- """
- result.startTest(self.case)
- self.result = result
- try:
- self._run_core()
- finally:
- result.stopTest(self.case)
- return result
-
- def _run_core(self):
- """Run the user supplied test code."""
- if self.exception_caught == self._run_user(self.case._run_setup,
- self.result):
- # Don't run the test method if we failed getting here.
- self.case._runCleanups(self.result)
- return
- # Run everything from here on in. If any of the methods raise an
- # exception we'll have failed.
- failed = False
- try:
- if self.exception_caught == self._run_user(
- self.case._run_test_method, self.result):
- failed = True
- finally:
- try:
- if self.exception_caught == self._run_user(
- self.case._run_teardown, self.result):
- failed = True
- finally:
- try:
- if not self._run_user(
- self.case._runCleanups, self.result):
- failed = True
- finally:
- if not failed:
- self.result.addSuccess(self.case,
- details=self.case.getDetails())
-
- def _run_user(self, fn, *args):
- """Run a user supplied function.
-
- Exceptions are processed by self.handlers.
- """
- try:
- return fn(*args)
- except KeyboardInterrupt:
- raise
- except Exception:
- # Note that bare exceptions are not caught, so raised strings will
- # escape: but they are deprecated anyway.
- exc_info = sys.exc_info()
- e = exc_info[1]
- for exc_class, handler in self.handlers:
- self.case.onException(exc_info)
- if isinstance(e, exc_class):
- handler(self.case, self.result, e)
- return self.exception_caught
- raise e
diff --git a/lib/subunit/python/testtools/testcase.py b/lib/subunit/python/testtools/testcase.py
deleted file mode 100644
index fd70141e6d..0000000000
--- a/lib/subunit/python/testtools/testcase.py
+++ /dev/null
@@ -1,468 +0,0 @@
-# Copyright (c) 2008, 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Test case related stuff."""
-
-__metaclass__ = type
-__all__ = [
- 'clone_test_with_new_id',
- 'TestCase',
- 'skip',
- 'skipIf',
- 'skipUnless',
- ]
-
-import copy
-try:
- from functools import wraps
-except ImportError:
- wraps = None
-import itertools
-import sys
-import types
-import unittest
-
-from testtools import content
-from testtools.runtest import RunTest
-from testtools.testresult import TestResult
-from testtools.utils import advance_iterator
-
-
-try:
- # Try to use the python2.7 SkipTest exception for signalling skips.
- from unittest.case import SkipTest as TestSkipped
-except ImportError:
- class TestSkipped(Exception):
- """Raised within TestCase.run() when a test is skipped."""
-
-
-try:
- # Try to use the same exceptions python 2.7 does.
- from unittest.case import _ExpectedFailure, _UnexpectedSuccess
-except ImportError:
- # Oops, not available, make our own.
- class _UnexpectedSuccess(Exception):
- """An unexpected success was raised.
-
- Note that this exception is private plumbing in testtools' testcase
- module.
- """
-
- class _ExpectedFailure(Exception):
- """An expected failure occured.
-
- Note that this exception is private plumbing in testtools' testcase
- module.
- """
-
-
-class TestCase(unittest.TestCase):
- """Extensions to the basic TestCase.
-
- :ivar exception_handlers: Exceptions to catch from setUp, runTest and
- tearDown. This list is able to be modified at any time and consists of
- (exception_class, handler(case, result, exception_value)) pairs.
- """
-
- skipException = TestSkipped
-
- def __init__(self, *args, **kwargs):
- """Construct a TestCase.
-
- :param testMethod: The name of the method to run.
- :param runTest: Optional class to use to execute the test. If not
- supplied testtools.runtest.RunTest is used. The instance to be
- used is created when run() is invoked, so will be fresh each time.
- """
- unittest.TestCase.__init__(self, *args, **kwargs)
- self._cleanups = []
- self._unique_id_gen = itertools.count(1)
- self.__setup_called = False
- self.__teardown_called = False
- self.__details = {}
- self.__RunTest = kwargs.get('runTest', RunTest)
- self.__exception_handlers = []
- self.exception_handlers = [
- (self.skipException, self._report_skip),
- (self.failureException, self._report_failure),
- (_ExpectedFailure, self._report_expected_failure),
- (_UnexpectedSuccess, self._report_unexpected_success),
- (Exception, self._report_error),
- ]
-
- def __eq__(self, other):
- eq = getattr(unittest.TestCase, '__eq__', None)
- if eq is not None and not unittest.TestCase.__eq__(self, other):
- return False
- return self.__dict__ == other.__dict__
-
- def __repr__(self):
- # We add id to the repr because it makes testing testtools easier.
- return "<%s id=0x%0x>" % (self.id(), id(self))
-
- def addDetail(self, name, content_object):
- """Add a detail to be reported with this test's outcome.
-
- For more details see pydoc testtools.TestResult.
-
- :param name: The name to give this detail.
- :param content_object: The content object for this detail. See
- testtools.content for more detail.
- """
- self.__details[name] = content_object
-
- def getDetails(self):
- """Get the details dict that will be reported with this test's outcome.
-
- For more details see pydoc testtools.TestResult.
- """
- return self.__details
-
- def shortDescription(self):
- return self.id()
-
- def skip(self, reason):
- """Cause this test to be skipped.
-
- This raises self.skipException(reason). skipException is raised
- to permit a skip to be triggered at any point (during setUp or the
- testMethod itself). The run() method catches skipException and
- translates that into a call to the result objects addSkip method.
-
- :param reason: The reason why the test is being skipped. This must
- support being cast into a unicode string for reporting.
- """
- raise self.skipException(reason)
-
- def _formatTypes(self, classOrIterable):
- """Format a class or a bunch of classes for display in an error."""
- className = getattr(classOrIterable, '__name__', None)
- if className is None:
- className = ', '.join(klass.__name__ for klass in classOrIterable)
- return className
-
- def _runCleanups(self, result):
- """Run the cleanups that have been added with addCleanup.
-
- See the docstring for addCleanup for more information.
-
- Returns True if all cleanups ran without error, False otherwise.
- """
- ok = True
- while self._cleanups:
- function, arguments, keywordArguments = self._cleanups.pop()
- try:
- function(*arguments, **keywordArguments)
- except KeyboardInterrupt:
- raise
- except:
- self._report_error(self, result, None)
- ok = False
- return ok
-
- def addCleanup(self, function, *arguments, **keywordArguments):
- """Add a cleanup function to be called after tearDown.
-
- Functions added with addCleanup will be called in reverse order of
- adding after the test method and before tearDown.
-
- If a function added with addCleanup raises an exception, the error
- will be recorded as a test error, and the next cleanup will then be
- run.
-
- Cleanup functions are always called before a test finishes running,
- even if setUp is aborted by an exception.
- """
- self._cleanups.append((function, arguments, keywordArguments))
-
- def addOnException(self, handler):
- """Add a handler to be called when an exception occurs in test code.
-
- This handler cannot affect what result methods are called, and is
- called before any outcome is called on the result object. An example
- use for it is to add some diagnostic state to the test details dict
- which is expensive to calculate and not interesting for reporting in
- the success case.
-
- Handlers are called before the outcome (such as addFailure) that
- the exception has caused.
-
- Handlers are called in first-added, first-called order, and if they
- raise an exception, that will propogate out of the test running
- machinery, halting test processing. As a result, do not call code that
- may unreasonably fail.
- """
- self.__exception_handlers.append(handler)
-
- def _add_reason(self, reason):
- self.addDetail('reason', content.Content(
- content.ContentType('text', 'plain'),
- lambda: [reason.encode('utf8')]))
-
- def assertIn(self, needle, haystack):
- """Assert that needle is in haystack."""
- self.assertTrue(
- needle in haystack, '%r not in %r' % (needle, haystack))
-
- def assertIs(self, expected, observed, message=''):
- """Assert that 'expected' is 'observed'.
-
- :param expected: The expected value.
- :param observed: The observed value.
- :param message: An optional message describing the error.
- """
- if message:
- message = ': ' + message
- self.assertTrue(
- expected is observed,
- '%r is not %r%s' % (expected, observed, message))
-
- def assertIsNot(self, expected, observed, message=''):
- """Assert that 'expected' is not 'observed'."""
- if message:
- message = ': ' + message
- self.assertTrue(
- expected is not observed,
- '%r is %r%s' % (expected, observed, message))
-
- def assertNotIn(self, needle, haystack):
- """Assert that needle is not in haystack."""
- self.assertTrue(
- needle not in haystack, '%r in %r' % (needle, haystack))
-
- def assertIsInstance(self, obj, klass):
- self.assertTrue(
- isinstance(obj, klass),
- '%r is not an instance of %s' % (obj, self._formatTypes(klass)))
-
- def assertRaises(self, excClass, callableObj, *args, **kwargs):
- """Fail unless an exception of class excClass is thrown
- by callableObj when invoked with arguments args and keyword
- arguments kwargs. If a different type of exception is
- thrown, it will not be caught, and the test case will be
- deemed to have suffered an error, exactly as for an
- unexpected exception.
- """
- try:
- ret = callableObj(*args, **kwargs)
- except excClass:
- return sys.exc_info()[1]
- else:
- excName = self._formatTypes(excClass)
- self.fail("%s not raised, %r returned instead." % (excName, ret))
- failUnlessRaises = assertRaises
-
- def assertThat(self, matchee, matcher):
- """Assert that matchee is matched by matcher.
-
- :param matchee: An object to match with matcher.
- :param matcher: An object meeting the testtools.Matcher protocol.
- :raises self.failureException: When matcher does not match thing.
- """
- mismatch = matcher.match(matchee)
- if not mismatch:
- return
- self.fail('Match failed. Matchee: "%s"\nMatcher: %s\nDifference: %s\n'
- % (matchee, matcher, mismatch.describe()))
-
- def defaultTestResult(self):
- return TestResult()
-
- def expectFailure(self, reason, predicate, *args, **kwargs):
- """Check that a test fails in a particular way.
-
- If the test fails in the expected way, a KnownFailure is caused. If it
- succeeds an UnexpectedSuccess is caused.
-
- The expected use of expectFailure is as a barrier at the point in a
- test where the test would fail. For example:
- >>> def test_foo(self):
- >>> self.expectFailure("1 should be 0", self.assertNotEqual, 1, 0)
- >>> self.assertEqual(1, 0)
-
- If in the future 1 were to equal 0, the expectFailure call can simply
- be removed. This separation preserves the original intent of the test
- while it is in the expectFailure mode.
- """
- self._add_reason(reason)
- try:
- predicate(*args, **kwargs)
- except self.failureException:
- exc_info = sys.exc_info()
- self.addDetail('traceback',
- content.TracebackContent(exc_info, self))
- raise _ExpectedFailure(exc_info)
- else:
- raise _UnexpectedSuccess(reason)
-
- def getUniqueInteger(self):
- """Get an integer unique to this test.
-
- Returns an integer that is guaranteed to be unique to this instance.
- Use this when you need an arbitrary integer in your test, or as a
- helper for custom anonymous factory methods.
- """
- return advance_iterator(self._unique_id_gen)
-
- def getUniqueString(self, prefix=None):
- """Get a string unique to this test.
-
- Returns a string that is guaranteed to be unique to this instance. Use
- this when you need an arbitrary string in your test, or as a helper
- for custom anonymous factory methods.
-
- :param prefix: The prefix of the string. If not provided, defaults
- to the id of the tests.
- :return: A bytestring of '<prefix>-<unique_int>'.
- """
- if prefix is None:
- prefix = self.id()
- return '%s-%d' % (prefix, self.getUniqueInteger())
-
- def onException(self, exc_info):
- """Called when an exception propogates from test code.
-
- :seealso addOnException:
- """
- for handler in self.__exception_handlers:
- handler(exc_info)
-
- @staticmethod
- def _report_error(self, result, err):
- self._report_traceback()
- result.addError(self, details=self.getDetails())
-
- @staticmethod
- def _report_expected_failure(self, result, err):
- result.addExpectedFailure(self, details=self.getDetails())
-
- @staticmethod
- def _report_failure(self, result, err):
- self._report_traceback()
- result.addFailure(self, details=self.getDetails())
-
- @staticmethod
- def _report_skip(self, result, err):
- if err.args:
- reason = err.args[0]
- else:
- reason = "no reason given."
- self._add_reason(reason)
- result.addSkip(self, details=self.getDetails())
-
- def _report_traceback(self):
- self.addDetail('traceback',
- content.TracebackContent(sys.exc_info(), self))
-
- @staticmethod
- def _report_unexpected_success(self, result, err):
- result.addUnexpectedSuccess(self, details=self.getDetails())
-
- def run(self, result=None):
- return self.__RunTest(self, self.exception_handlers).run(result)
-
- def _run_setup(self, result):
- """Run the setUp function for this test.
-
- :param result: A testtools.TestResult to report activity to.
- :raises ValueError: If the base class setUp is not called, a
- ValueError is raised.
- """
- self.setUp()
- if not self.__setup_called:
- raise ValueError(
- "TestCase.setUp was not called. Have you upcalled all the "
- "way up the hierarchy from your setUp? e.g. Call "
- "super(%s, self).setUp() from your setUp()."
- % self.__class__.__name__)
-
- def _run_teardown(self, result):
- """Run the tearDown function for this test.
-
- :param result: A testtools.TestResult to report activity to.
- :raises ValueError: If the base class tearDown is not called, a
- ValueError is raised.
- """
- self.tearDown()
- if not self.__teardown_called:
- raise ValueError(
- "TestCase.tearDown was not called. Have you upcalled all the "
- "way up the hierarchy from your tearDown? e.g. Call "
- "super(%s, self).tearDown() from your tearDown()."
- % self.__class__.__name__)
-
- def _run_test_method(self, result):
- """Run the test method for this test.
-
- :param result: A testtools.TestResult to report activity to.
- :return: None.
- """
- absent_attr = object()
- # Python 2.5+
- method_name = getattr(self, '_testMethodName', absent_attr)
- if method_name is absent_attr:
- # Python 2.4
- method_name = getattr(self, '_TestCase__testMethodName')
- testMethod = getattr(self, method_name)
- testMethod()
-
- def setUp(self):
- unittest.TestCase.setUp(self)
- self.__setup_called = True
-
- def tearDown(self):
- unittest.TestCase.tearDown(self)
- self.__teardown_called = True
-
-
-# Python 2.4 did not know how to copy functions.
-if types.FunctionType not in copy._copy_dispatch:
- copy._copy_dispatch[types.FunctionType] = copy._copy_immutable
-
-
-
-def clone_test_with_new_id(test, new_id):
- """Copy a TestCase, and give the copied test a new id.
-
- This is only expected to be used on tests that have been constructed but
- not executed.
- """
- newTest = copy.copy(test)
- newTest.id = lambda: new_id
- return newTest
-
-
-def skip(reason):
- """A decorator to skip unit tests.
-
- This is just syntactic sugar so users don't have to change any of their
- unit tests in order to migrate to python 2.7, which provides the
- @unittest.skip decorator.
- """
- def decorator(test_item):
- if wraps is not None:
- @wraps(test_item)
- def skip_wrapper(*args, **kwargs):
- raise TestCase.skipException(reason)
- else:
- def skip_wrapper(test_item):
- test_item.skip(reason)
- return skip_wrapper
- return decorator
-
-
-def skipIf(condition, reason):
- """Skip a test if the condition is true."""
- if condition:
- return skip(reason)
- def _id(obj):
- return obj
- return _id
-
-
-def skipUnless(condition, reason):
- """Skip a test unless the condition is true."""
- if not condition:
- return skip(reason)
- def _id(obj):
- return obj
- return _id
diff --git a/lib/subunit/python/testtools/testresult/__init__.py b/lib/subunit/python/testtools/testresult/__init__.py
deleted file mode 100644
index 2ee3d25293..0000000000
--- a/lib/subunit/python/testtools/testresult/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Test result objects."""
-
-__all__ = [
- 'ExtendedToOriginalDecorator',
- 'MultiTestResult',
- 'TestResult',
- 'TextTestResult',
- 'ThreadsafeForwardingResult',
- ]
-
-from real import (
- ExtendedToOriginalDecorator,
- MultiTestResult,
- TestResult,
- TextTestResult,
- ThreadsafeForwardingResult,
- )
diff --git a/lib/subunit/python/testtools/testresult/doubles.py b/lib/subunit/python/testtools/testresult/doubles.py
deleted file mode 100644
index d231c919c2..0000000000
--- a/lib/subunit/python/testtools/testresult/doubles.py
+++ /dev/null
@@ -1,95 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Doubles of test result objects, useful for testing unittest code."""
-
-__all__ = [
- 'Python26TestResult',
- 'Python27TestResult',
- 'ExtendedTestResult',
- ]
-
-
-class LoggingBase(object):
- """Basic support for logging of results."""
-
- def __init__(self):
- self._events = []
- self.shouldStop = False
-
-
-class Python26TestResult(LoggingBase):
- """A precisely python 2.6 like test result, that logs."""
-
- def addError(self, test, err):
- self._events.append(('addError', test, err))
-
- def addFailure(self, test, err):
- self._events.append(('addFailure', test, err))
-
- def addSuccess(self, test):
- self._events.append(('addSuccess', test))
-
- def startTest(self, test):
- self._events.append(('startTest', test))
-
- def stop(self):
- self.shouldStop = True
-
- def stopTest(self, test):
- self._events.append(('stopTest', test))
-
-
-class Python27TestResult(Python26TestResult):
- """A precisely python 2.7 like test result, that logs."""
-
- def addExpectedFailure(self, test, err):
- self._events.append(('addExpectedFailure', test, err))
-
- def addSkip(self, test, reason):
- self._events.append(('addSkip', test, reason))
-
- def addUnexpectedSuccess(self, test):
- self._events.append(('addUnexpectedSuccess', test))
-
- def startTestRun(self):
- self._events.append(('startTestRun',))
-
- def stopTestRun(self):
- self._events.append(('stopTestRun',))
-
-
-class ExtendedTestResult(Python27TestResult):
- """A test result like the proposed extended unittest result API."""
-
- def addError(self, test, err=None, details=None):
- self._events.append(('addError', test, err or details))
-
- def addFailure(self, test, err=None, details=None):
- self._events.append(('addFailure', test, err or details))
-
- def addExpectedFailure(self, test, err=None, details=None):
- self._events.append(('addExpectedFailure', test, err or details))
-
- def addSkip(self, test, reason=None, details=None):
- self._events.append(('addSkip', test, reason or details))
-
- def addSuccess(self, test, details=None):
- if details:
- self._events.append(('addSuccess', test, details))
- else:
- self._events.append(('addSuccess', test))
-
- def addUnexpectedSuccess(self, test, details=None):
- if details is not None:
- self._events.append(('addUnexpectedSuccess', test, details))
- else:
- self._events.append(('addUnexpectedSuccess', test))
-
- def progress(self, offset, whence):
- self._events.append(('progress', offset, whence))
-
- def tags(self, new_tags, gone_tags):
- self._events.append(('tags', new_tags, gone_tags))
-
- def time(self, time):
- self._events.append(('time', time))
diff --git a/lib/subunit/python/testtools/testresult/real.py b/lib/subunit/python/testtools/testresult/real.py
deleted file mode 100644
index 8c8a3edd6e..0000000000
--- a/lib/subunit/python/testtools/testresult/real.py
+++ /dev/null
@@ -1,540 +0,0 @@
-# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
-
-"""Test results and related things."""
-
-__metaclass__ = type
-__all__ = [
- 'ExtendedToOriginalDecorator',
- 'MultiTestResult',
- 'TestResult',
- 'ThreadsafeForwardingResult',
- ]
-
-import datetime
-import unittest
-
-
-class TestResult(unittest.TestResult):
- """Subclass of unittest.TestResult extending the protocol for flexability.
-
- This test result supports an experimental protocol for providing additional
- data to in test outcomes. All the outcome methods take an optional dict
- 'details'. If supplied any other detail parameters like 'err' or 'reason'
- should not be provided. The details dict is a mapping from names to
- MIME content objects (see testtools.content). This permits attaching
- tracebacks, log files, or even large objects like databases that were
- part of the test fixture. Until this API is accepted into upstream
- Python it is considered experimental: it may be replaced at any point
- by a newer version more in line with upstream Python. Compatibility would
- be aimed for in this case, but may not be possible.
-
- :ivar skip_reasons: A dict of skip-reasons -> list of tests. See addSkip.
- """
-
- def __init__(self):
- super(TestResult, self).__init__()
- self.skip_reasons = {}
- self.__now = None
- # -- Start: As per python 2.7 --
- self.expectedFailures = []
- self.unexpectedSuccesses = []
- # -- End: As per python 2.7 --
-
- def addExpectedFailure(self, test, err=None, details=None):
- """Called when a test has failed in an expected manner.
-
- Like with addSuccess and addError, testStopped should still be called.
-
- :param test: The test that has been skipped.
- :param err: The exc_info of the error that was raised.
- :return: None
- """
- # This is the python 2.7 implementation
- self.expectedFailures.append(
- (test, self._err_details_to_string(test, err, details)))
-
- def addError(self, test, err=None, details=None):
- """Called when an error has occurred. 'err' is a tuple of values as
- returned by sys.exc_info().
-
- :param details: Alternative way to supply details about the outcome.
- see the class docstring for more information.
- """
- self.errors.append((test,
- self._err_details_to_string(test, err, details)))
-
- def addFailure(self, test, err=None, details=None):
- """Called when an error has occurred. 'err' is a tuple of values as
- returned by sys.exc_info().
-
- :param details: Alternative way to supply details about the outcome.
- see the class docstring for more information.
- """
- self.failures.append((test,
- self._err_details_to_string(test, err, details)))
-
- def addSkip(self, test, reason=None, details=None):
- """Called when a test has been skipped rather than running.
-
- Like with addSuccess and addError, testStopped should still be called.
-
- This must be called by the TestCase. 'addError' and 'addFailure' will
- not call addSkip, since they have no assumptions about the kind of
- errors that a test can raise.
-
- :param test: The test that has been skipped.
- :param reason: The reason for the test being skipped. For instance,
- u"pyGL is not available".
- :param details: Alternative way to supply details about the outcome.
- see the class docstring for more information.
- :return: None
- """
- if reason is None:
- reason = details.get('reason')
- if reason is None:
- reason = 'No reason given'
- else:
- reason = ''.join(reason.iter_text())
- skip_list = self.skip_reasons.setdefault(reason, [])
- skip_list.append(test)
-
- def addSuccess(self, test, details=None):
- """Called when a test succeeded."""
-
- def addUnexpectedSuccess(self, test, details=None):
- """Called when a test was expected to fail, but succeed."""
- self.unexpectedSuccesses.append(test)
-
- def _err_details_to_string(self, test, err=None, details=None):
- """Convert an error in exc_info form or a contents dict to a string."""
- if err is not None:
- return self._exc_info_to_string(err, test)
- return _details_to_str(details)
-
- def _now(self):
- """Return the current 'test time'.
-
- If the time() method has not been called, this is equivalent to
- datetime.now(), otherwise its the last supplied datestamp given to the
- time() method.
- """
- if self.__now is None:
- return datetime.datetime.now()
- else:
- return self.__now
-
- def startTestRun(self):
- """Called before a test run starts.
-
- New in python 2.7
- """
-
- def stopTestRun(self):
- """Called after a test run completes
-
- New in python 2.7
- """
-
- def time(self, a_datetime):
- """Provide a timestamp to represent the current time.
-
- This is useful when test activity is time delayed, or happening
- concurrently and getting the system time between API calls will not
- accurately represent the duration of tests (or the whole run).
-
- Calling time() sets the datetime used by the TestResult object.
- Time is permitted to go backwards when using this call.
-
- :param a_datetime: A datetime.datetime object with TZ information or
- None to reset the TestResult to gathering time from the system.
- """
- self.__now = a_datetime
-
- def done(self):
- """Called when the test runner is done.
-
- deprecated in favour of stopTestRun.
- """
-
-
-class MultiTestResult(TestResult):
- """A test result that dispatches to many test results."""
-
- def __init__(self, *results):
- TestResult.__init__(self)
- self._results = map(ExtendedToOriginalDecorator, results)
-
- def _dispatch(self, message, *args, **kwargs):
- for result in self._results:
- getattr(result, message)(*args, **kwargs)
-
- def startTest(self, test):
- self._dispatch('startTest', test)
-
- def stopTest(self, test):
- self._dispatch('stopTest', test)
-
- def addError(self, test, error=None, details=None):
- self._dispatch('addError', test, error, details=details)
-
- def addExpectedFailure(self, test, err=None, details=None):
- self._dispatch('addExpectedFailure', test, err, details=details)
-
- def addFailure(self, test, err=None, details=None):
- self._dispatch('addFailure', test, err, details=details)
-
- def addSkip(self, test, reason=None, details=None):
- self._dispatch('addSkip', test, reason, details=details)
-
- def addSuccess(self, test, details=None):
- self._dispatch('addSuccess', test, details=details)
-
- def addUnexpectedSuccess(self, test, details=None):
- self._dispatch('addUnexpectedSuccess', test, details=details)
-
- def startTestRun(self):
- self._dispatch('startTestRun')
-
- def stopTestRun(self):
- self._dispatch('stopTestRun')
-
- def done(self):
- self._dispatch('done')
-
-
-class TextTestResult(TestResult):
- """A TestResult which outputs activity to a text stream."""
-
- def __init__(self, stream):
- """Construct a TextTestResult writing to stream."""
- super(TextTestResult, self).__init__()
- self.stream = stream
- self.sep1 = '=' * 70 + '\n'
- self.sep2 = '-' * 70 + '\n'
-
- def _delta_to_float(self, a_timedelta):
- return (a_timedelta.days * 86400.0 + a_timedelta.seconds +
- a_timedelta.microseconds / 1000000.0)
-
- def _show_list(self, label, error_list):
- for test, output in error_list:
- self.stream.write(self.sep1)
- self.stream.write("%s: %s\n" % (label, test.id()))
- self.stream.write(self.sep2)
- self.stream.write(output)
-
- def startTestRun(self):
- super(TextTestResult, self).startTestRun()
- self.__start = self._now()
- self.stream.write("Tests running...\n")
-
- def stopTestRun(self):
- if self.testsRun != 1:
- plural = 's'
- else:
- plural = ''
- stop = self._now()
- self._show_list('ERROR', self.errors)
- self._show_list('FAIL', self.failures)
- self.stream.write("Ran %d test%s in %.3fs\n\n" %
- (self.testsRun, plural,
- self._delta_to_float(stop - self.__start)))
- if self.wasSuccessful():
- self.stream.write("OK\n")
- else:
- self.stream.write("FAILED (")
- details = []
- details.append("failures=%d" % (
- len(self.failures) + len(self.errors)))
- self.stream.write(", ".join(details))
- self.stream.write(")\n")
- super(TextTestResult, self).stopTestRun()
-
-
-class ThreadsafeForwardingResult(TestResult):
- """A TestResult which ensures the target does not receive mixed up calls.
-
- This is used when receiving test results from multiple sources, and batches
- up all the activity for a single test into a thread-safe batch where all
- other ThreadsafeForwardingResult objects sharing the same semaphore will be
- locked out.
-
- Typical use of ThreadsafeForwardingResult involves creating one
- ThreadsafeForwardingResult per thread in a ConcurrentTestSuite. These
- forward to the TestResult that the ConcurrentTestSuite run method was
- called with.
-
- target.done() is called once for each ThreadsafeForwardingResult that
- forwards to the same target. If the target's done() takes special action,
- care should be taken to accommodate this.
- """
-
- def __init__(self, target, semaphore):
- """Create a ThreadsafeForwardingResult forwarding to target.
-
- :param target: A TestResult.
- :param semaphore: A threading.Semaphore with limit 1.
- """
- TestResult.__init__(self)
- self.result = ExtendedToOriginalDecorator(target)
- self.semaphore = semaphore
-
- def addError(self, test, err=None, details=None):
- self.semaphore.acquire()
- try:
- self.result.startTest(test)
- self.result.addError(test, err, details=details)
- self.result.stopTest(test)
- finally:
- self.semaphore.release()
-
- def addExpectedFailure(self, test, err=None, details=None):
- self.semaphore.acquire()
- try:
- self.result.startTest(test)
- self.result.addExpectedFailure(test, err, details=details)
- self.result.stopTest(test)
- finally:
- self.semaphore.release()
-
- def addFailure(self, test, err=None, details=None):
- self.semaphore.acquire()
- try:
- self.result.startTest(test)
- self.result.addFailure(test, err, details=details)
- self.result.stopTest(test)
- finally:
- self.semaphore.release()
-
- def addSkip(self, test, reason=None, details=None):
- self.semaphore.acquire()
- try:
- self.result.startTest(test)
- self.result.addSkip(test, reason, details=details)
- self.result.stopTest(test)
- finally:
- self.semaphore.release()
-
- def addSuccess(self, test, details=None):
- self.semaphore.acquire()
- try:
- self.result.startTest(test)
- self.result.addSuccess(test, details=details)
- self.result.stopTest(test)
- finally:
- self.semaphore.release()
-
- def addUnexpectedSuccess(self, test, details=None):
- self.semaphore.acquire()
- try:
- self.result.startTest(test)
- self.result.addUnexpectedSuccess(test, details=details)
- self.result.stopTest(test)
- finally:
- self.semaphore.release()
-
- def startTestRun(self):
- self.semaphore.acquire()
- try:
- self.result.startTestRun()
- finally:
- self.semaphore.release()
-
- def stopTestRun(self):
- self.semaphore.acquire()
- try:
- self.result.stopTestRun()
- finally:
- self.semaphore.release()
-
- def done(self):
- self.semaphore.acquire()
- try:
- self.result.done()
- finally:
- self.semaphore.release()
-
-
-class ExtendedToOriginalDecorator(object):
- """Permit new TestResult API code to degrade gracefully with old results.
-
- This decorates an existing TestResult and converts missing outcomes
- such as addSkip to older outcomes such as addSuccess. It also supports
- the extended details protocol. In all cases the most recent protocol
- is attempted first, and fallbacks only occur when the decorated result
- does not support the newer style of calling.
- """
-
- def __init__(self, decorated):
- self.decorated = decorated
-
- def __getattr__(self, name):
- return getattr(self.decorated, name)
-
- def addError(self, test, err=None, details=None):
- self._check_args(err, details)
- if details is not None:
- try:
- return self.decorated.addError(test, details=details)
- except TypeError:
- # have to convert
- err = self._details_to_exc_info(details)
- return self.decorated.addError(test, err)
-
- def addExpectedFailure(self, test, err=None, details=None):
- self._check_args(err, details)
- addExpectedFailure = getattr(
- self.decorated, 'addExpectedFailure', None)
- if addExpectedFailure is None:
- return self.addSuccess(test)
- if details is not None:
- try:
- return addExpectedFailure(test, details=details)
- except TypeError:
- # have to convert
- err = self._details_to_exc_info(details)
- return addExpectedFailure(test, err)
-
- def addFailure(self, test, err=None, details=None):
- self._check_args(err, details)
- if details is not None:
- try:
- return self.decorated.addFailure(test, details=details)
- except TypeError:
- # have to convert
- err = self._details_to_exc_info(details)
- return self.decorated.addFailure(test, err)
-
- def addSkip(self, test, reason=None, details=None):
- self._check_args(reason, details)
- addSkip = getattr(self.decorated, 'addSkip', None)
- if addSkip is None:
- return self.decorated.addSuccess(test)
- if details is not None:
- try:
- return addSkip(test, details=details)
- except TypeError:
- # have to convert
- reason = _details_to_str(details)
- return addSkip(test, reason)
-
- def addUnexpectedSuccess(self, test, details=None):
- outcome = getattr(self.decorated, 'addUnexpectedSuccess', None)
- if outcome is None:
- return self.decorated.addSuccess(test)
- if details is not None:
- try:
- return outcome(test, details=details)
- except TypeError:
- pass
- return outcome(test)
-
- def addSuccess(self, test, details=None):
- if details is not None:
- try:
- return self.decorated.addSuccess(test, details=details)
- except TypeError:
- pass
- return self.decorated.addSuccess(test)
-
- def _check_args(self, err, details):
- param_count = 0
- if err is not None:
- param_count += 1
- if details is not None:
- param_count += 1
- if param_count != 1:
- raise ValueError("Must pass only one of err '%s' and details '%s"
- % (err, details))
-
- def _details_to_exc_info(self, details):
- """Convert a details dict to an exc_info tuple."""
- return (_StringException,
- _StringException(_details_to_str(details)), None)
-
- def done(self):
- try:
- return self.decorated.done()
- except AttributeError:
- return
-
- def progress(self, offset, whence):
- method = getattr(self.decorated, 'progress', None)
- if method is None:
- return
- return method(offset, whence)
-
- @property
- def shouldStop(self):
- return self.decorated.shouldStop
-
- def startTest(self, test):
- return self.decorated.startTest(test)
-
- def startTestRun(self):
- try:
- return self.decorated.startTestRun()
- except AttributeError:
- return
-
- def stop(self):
- return self.decorated.stop()
-
- def stopTest(self, test):
- return self.decorated.stopTest(test)
-
- def stopTestRun(self):
- try:
- return self.decorated.stopTestRun()
- except AttributeError:
- return
-
- def tags(self, new_tags, gone_tags):
- method = getattr(self.decorated, 'tags', None)
- if method is None:
- return
- return method(new_tags, gone_tags)
-
- def time(self, a_datetime):
- method = getattr(self.decorated, 'time', None)
- if method is None:
- return
- return method(a_datetime)
-
- def wasSuccessful(self):
- return self.decorated.wasSuccessful()
-
-
-class _StringException(Exception):
- """An exception made from an arbitrary string."""
-
- def __hash__(self):
- return id(self)
-
- def __str__(self):
- """Stringify better than 2.x's default behaviour of ascii encoding."""
- return self.args[0]
-
- def __eq__(self, other):
- try:
- return self.args == other.args
- except AttributeError:
- return False
-
-
-def _details_to_str(details):
- """Convert a details dict to a string."""
- chars = []
- # sorted is for testing, may want to remove that and use a dict
- # subclass with defined order for items instead.
- for key, content in sorted(details.items()):
- if content.content_type.type != 'text':
- chars.append('Binary content: %s\n' % key)
- continue
- chars.append('Text attachment: %s\n' % key)
- chars.append('------------\n')
- chars.extend(content.iter_text())
- if not chars[-1].endswith('\n'):
- chars.append('\n')
- chars.append('------------\n')
- return ''.join(chars)
diff --git a/lib/subunit/python/testtools/tests/__init__.py b/lib/subunit/python/testtools/tests/__init__.py
deleted file mode 100644
index 2cceba91e2..0000000000
--- a/lib/subunit/python/testtools/tests/__init__.py
+++ /dev/null
@@ -1,30 +0,0 @@
-"""Tests for testtools itself."""
-
-# See README for copyright and licensing details.
-
-import unittest
-from testtools.tests import (
- test_content,
- test_content_type,
- test_matchers,
- test_runtest,
- test_testtools,
- test_testresult,
- test_testsuite,
- )
-
-
-def test_suite():
- suites = []
- modules = [
- test_content,
- test_content_type,
- test_matchers,
- test_runtest,
- test_testresult,
- test_testsuite,
- test_testtools,
- ]
- for module in modules:
- suites.append(getattr(module, 'test_suite')())
- return unittest.TestSuite(suites)
diff --git a/lib/subunit/python/testtools/tests/helpers.py b/lib/subunit/python/testtools/tests/helpers.py
deleted file mode 100644
index c4cf10c736..0000000000
--- a/lib/subunit/python/testtools/tests/helpers.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
-
-"""Helpers for tests."""
-
-import sys
-
-__metaclass__ = type
-__all__ = [
- 'LoggingResult',
- ]
-
-from testtools import TestResult
-
-
-try:
- raise Exception
-except Exception:
- an_exc_info = sys.exc_info()
-
-# Deprecated: This classes attributes are somewhat non deterministic which
-# leads to hard to predict tests (because Python upstream are changing things.
-class LoggingResult(TestResult):
- """TestResult that logs its event to a list."""
-
- def __init__(self, log):
- self._events = log
- super(LoggingResult, self).__init__()
-
- def startTest(self, test):
- self._events.append(('startTest', test))
- super(LoggingResult, self).startTest(test)
-
- def stopTest(self, test):
- self._events.append(('stopTest', test))
- super(LoggingResult, self).stopTest(test)
-
- def addFailure(self, test, error):
- self._events.append(('addFailure', test, error))
- super(LoggingResult, self).addFailure(test, error)
-
- def addError(self, test, error):
- self._events.append(('addError', test, error))
- super(LoggingResult, self).addError(test, error)
-
- def addSkip(self, test, reason):
- self._events.append(('addSkip', test, reason))
- super(LoggingResult, self).addSkip(test, reason)
-
- def addSuccess(self, test):
- self._events.append(('addSuccess', test))
- super(LoggingResult, self).addSuccess(test)
-
- def startTestRun(self):
- self._events.append('startTestRun')
- super(LoggingResult, self).startTestRun()
-
- def stopTestRun(self):
- self._events.append('stopTestRun')
- super(LoggingResult, self).stopTestRun()
-
- def done(self):
- self._events.append('done')
- super(LoggingResult, self).done()
-
-# Note, the following three classes are different to LoggingResult by
-# being fully defined exact matches rather than supersets.
-from testtools.testresult.doubles import *
diff --git a/lib/subunit/python/testtools/tests/test_content.py b/lib/subunit/python/testtools/tests/test_content.py
deleted file mode 100644
index 1159362036..0000000000
--- a/lib/subunit/python/testtools/tests/test_content.py
+++ /dev/null
@@ -1,72 +0,0 @@
-# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
-
-import unittest
-from testtools.content import Content, TracebackContent
-from testtools.content_type import ContentType
-from testtools.utils import _u
-from testtools.tests.helpers import an_exc_info
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
-
-
-class TestContent(unittest.TestCase):
-
- def test___init___None_errors(self):
- self.assertRaises(ValueError, Content, None, None)
- self.assertRaises(ValueError, Content, None, lambda: ["traceback"])
- self.assertRaises(ValueError, Content,
- ContentType("text", "traceback"), None)
-
- def test___init___sets_ivars(self):
- content_type = ContentType("foo", "bar")
- content = Content(content_type, lambda: ["bytes"])
- self.assertEqual(content_type, content.content_type)
- self.assertEqual(["bytes"], list(content.iter_bytes()))
-
- def test___eq__(self):
- content_type = ContentType("foo", "bar")
- content1 = Content(content_type, lambda: ["bytes"])
- content2 = Content(content_type, lambda: ["bytes"])
- content3 = Content(content_type, lambda: ["by", "tes"])
- content4 = Content(content_type, lambda: ["by", "te"])
- content5 = Content(ContentType("f", "b"), lambda: ["by", "tes"])
- self.assertEqual(content1, content2)
- self.assertEqual(content1, content3)
- self.assertNotEqual(content1, content4)
- self.assertNotEqual(content1, content5)
-
- def test_iter_text_not_text_errors(self):
- content_type = ContentType("foo", "bar")
- content = Content(content_type, lambda: ["bytes"])
- self.assertRaises(ValueError, content.iter_text)
-
- def test_iter_text_decodes(self):
- content_type = ContentType("text", "strange", {"charset": "utf8"})
- content = Content(
- content_type, lambda: [_u("bytes\xea").encode("utf8")])
- self.assertEqual([_u("bytes\xea")], list(content.iter_text()))
-
- def test_iter_text_default_charset_iso_8859_1(self):
- content_type = ContentType("text", "strange")
- text = _u("bytes\xea")
- iso_version = text.encode("ISO-8859-1")
- content = Content(content_type, lambda: [iso_version])
- self.assertEqual([text], list(content.iter_text()))
-
-
-class TestTracebackContent(unittest.TestCase):
-
- def test___init___None_errors(self):
- self.assertRaises(ValueError, TracebackContent, None, None)
-
- def test___init___sets_ivars(self):
- content = TracebackContent(an_exc_info, self)
- content_type = ContentType("text", "x-traceback",
- {"language": "python", "charset": "utf8"})
- self.assertEqual(content_type, content.content_type)
- result = unittest.TestResult()
- expected = result._exc_info_to_string(an_exc_info, self)
- self.assertEqual(expected, ''.join(list(content.iter_text())))
diff --git a/lib/subunit/python/testtools/tests/test_content_type.py b/lib/subunit/python/testtools/tests/test_content_type.py
deleted file mode 100644
index dbefc21dec..0000000000
--- a/lib/subunit/python/testtools/tests/test_content_type.py
+++ /dev/null
@@ -1,34 +0,0 @@
-# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
-
-import unittest
-from testtools.content_type import ContentType
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
-
-
-class TestContentType(unittest.TestCase):
-
- def test___init___None_errors(self):
- self.assertRaises(ValueError, ContentType, None, None)
- self.assertRaises(ValueError, ContentType, None, "traceback")
- self.assertRaises(ValueError, ContentType, "text", None)
-
- def test___init___sets_ivars(self):
- content_type = ContentType("foo", "bar")
- self.assertEqual("foo", content_type.type)
- self.assertEqual("bar", content_type.subtype)
- self.assertEqual({}, content_type.parameters)
-
- def test___init___with_parameters(self):
- content_type = ContentType("foo", "bar", {"quux":"thing"})
- self.assertEqual({"quux":"thing"}, content_type.parameters)
-
- def test___eq__(self):
- content_type1 = ContentType("foo", "bar", {"quux":"thing"})
- content_type2 = ContentType("foo", "bar", {"quux":"thing"})
- content_type3 = ContentType("foo", "bar", {"quux":"thing2"})
- self.assertTrue(content_type1.__eq__(content_type2))
- self.assertFalse(content_type1.__eq__(content_type3))
diff --git a/lib/subunit/python/testtools/tests/test_matchers.py b/lib/subunit/python/testtools/tests/test_matchers.py
deleted file mode 100644
index 74b1ebc56a..0000000000
--- a/lib/subunit/python/testtools/tests/test_matchers.py
+++ /dev/null
@@ -1,171 +0,0 @@
-# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
-
-"""Tests for matchers."""
-
-import doctest
-
-from testtools import (
- Matcher, # check that Matcher is exposed at the top level for docs.
- TestCase,
- )
-from testtools.matchers import (
- Annotate,
- Equals,
- DocTestMatches,
- MatchesAny,
- MatchesAll,
- Not,
- NotEquals,
- )
-
-
-class TestMatchersInterface:
-
- def test_matches_match(self):
- matcher = self.matches_matcher
- matches = self.matches_matches
- mismatches = self.matches_mismatches
- for candidate in matches:
- self.assertEqual(None, matcher.match(candidate))
- for candidate in mismatches:
- mismatch = matcher.match(candidate)
- self.assertNotEqual(None, mismatch)
- self.assertNotEqual(None, getattr(mismatch, 'describe', None))
-
- def test__str__(self):
- # [(expected, object to __str__)].
- examples = self.str_examples
- for expected, matcher in examples:
- self.assertThat(matcher, DocTestMatches(expected))
-
- def test_describe_difference(self):
- # [(expected, matchee, matcher), ...]
- examples = self.describe_examples
- for difference, matchee, matcher in examples:
- mismatch = matcher.match(matchee)
- self.assertEqual(difference, mismatch.describe())
-
-
-class TestDocTestMatchesInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = DocTestMatches("Ran 1 test in ...s", doctest.ELLIPSIS)
- matches_matches = ["Ran 1 test in 0.000s", "Ran 1 test in 1.234s"]
- matches_mismatches = ["Ran 1 tests in 0.000s", "Ran 2 test in 0.000s"]
-
- str_examples = [("DocTestMatches('Ran 1 test in ...s\\n')",
- DocTestMatches("Ran 1 test in ...s")),
- ("DocTestMatches('foo\\n', flags=8)", DocTestMatches("foo", flags=8)),
- ]
-
- describe_examples = [('Expected:\n Ran 1 tests in ...s\nGot:\n'
- ' Ran 1 test in 0.123s\n', "Ran 1 test in 0.123s",
- DocTestMatches("Ran 1 tests in ...s", doctest.ELLIPSIS))]
-
-
-class TestDocTestMatchesSpecific(TestCase):
-
- def test___init__simple(self):
- matcher = DocTestMatches("foo")
- self.assertEqual("foo\n", matcher.want)
-
- def test___init__flags(self):
- matcher = DocTestMatches("bar\n", doctest.ELLIPSIS)
- self.assertEqual("bar\n", matcher.want)
- self.assertEqual(doctest.ELLIPSIS, matcher.flags)
-
-
-class TestEqualsInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = Equals(1)
- matches_matches = [1]
- matches_mismatches = [2]
-
- str_examples = [("Equals(1)", Equals(1)), ("Equals('1')", Equals('1'))]
-
- describe_examples = [("1 != 2", 2, Equals(1))]
-
-
-class TestNotEqualsInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = NotEquals(1)
- matches_matches = [2]
- matches_mismatches = [1]
-
- str_examples = [
- ("NotEquals(1)", NotEquals(1)), ("NotEquals('1')", NotEquals('1'))]
-
- describe_examples = [("1 == 1", 1, NotEquals(1))]
-
-
-class TestNotInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = Not(Equals(1))
- matches_matches = [2]
- matches_mismatches = [1]
-
- str_examples = [
- ("Not(Equals(1))", Not(Equals(1))),
- ("Not(Equals('1'))", Not(Equals('1')))]
-
- describe_examples = [('1 matches Equals(1)', 1, Not(Equals(1)))]
-
-
-class TestMatchersAnyInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesAny(DocTestMatches("1"), DocTestMatches("2"))
- matches_matches = ["1", "2"]
- matches_mismatches = ["3"]
-
- str_examples = [(
- "MatchesAny(DocTestMatches('1\\n'), DocTestMatches('2\\n'))",
- MatchesAny(DocTestMatches("1"), DocTestMatches("2"))),
- ]
-
- describe_examples = [("""Differences: [
-Expected:
- 1
-Got:
- 3
-
-Expected:
- 2
-Got:
- 3
-
-]
-""",
- "3", MatchesAny(DocTestMatches("1"), DocTestMatches("2")))]
-
-
-class TestMatchesAllInterface(TestCase, TestMatchersInterface):
-
- matches_matcher = MatchesAll(NotEquals(1), NotEquals(2))
- matches_matches = [3, 4]
- matches_mismatches = [1, 2]
-
- str_examples = [
- ("MatchesAll(NotEquals(1), NotEquals(2))",
- MatchesAll(NotEquals(1), NotEquals(2)))]
-
- describe_examples = [("""Differences: [
-1 == 1
-]
-""",
- 1, MatchesAll(NotEquals(1), NotEquals(2)))]
-
-
-class TestAnnotate(TestCase, TestMatchersInterface):
-
- matches_matcher = Annotate("foo", Equals(1))
- matches_matches = [1]
- matches_mismatches = [2]
-
- str_examples = [
- ("Annotate('foo', Equals(1))", Annotate("foo", Equals(1)))]
-
- describe_examples = [("1 != 2: foo", 2, Annotate('foo', Equals(1)))]
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/subunit/python/testtools/tests/test_runtest.py b/lib/subunit/python/testtools/tests/test_runtest.py
deleted file mode 100644
index 5c46ad1784..0000000000
--- a/lib/subunit/python/testtools/tests/test_runtest.py
+++ /dev/null
@@ -1,185 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Tests for the RunTest single test execution logic."""
-
-from testtools import (
- ExtendedToOriginalDecorator,
- RunTest,
- TestCase,
- TestResult,
- )
-from testtools.tests.helpers import ExtendedTestResult
-
-
-class TestRunTest(TestCase):
-
- def make_case(self):
- class Case(TestCase):
- def test(self):
- pass
- return Case('test')
-
- def test___init___short(self):
- run = RunTest("bar")
- self.assertEqual("bar", run.case)
- self.assertEqual([], run.handlers)
-
- def test__init____handlers(self):
- handlers = [("quux", "baz")]
- run = RunTest("bar", handlers)
- self.assertEqual(handlers, run.handlers)
-
- def test_run_with_result(self):
- # test.run passes result down to _run_test_method.
- log = []
- class Case(TestCase):
- def _run_test_method(self, result):
- log.append(result)
- case = Case('_run_test_method')
- run = RunTest(case, lambda x: log.append(x))
- result = TestResult()
- run.run(result)
- self.assertEqual(1, len(log))
- self.assertEqual(result, log[0].decorated)
-
- def test_run_no_result_manages_new_result(self):
- log = []
- run = RunTest(self.make_case(), lambda x: log.append(x) or x)
- result = run.run()
- self.assertIsInstance(result.decorated, TestResult)
-
- def test__run_core_called(self):
- case = self.make_case()
- log = []
- run = RunTest(case, lambda x: x)
- run._run_core = lambda: log.append('foo')
- run.run()
- self.assertEqual(['foo'], log)
-
- def test__run_user_does_not_catch_keyboard(self):
- case = self.make_case()
- def raises():
- raise KeyboardInterrupt("yo")
- run = RunTest(case, None)
- run.result = ExtendedTestResult()
- self.assertRaises(KeyboardInterrupt, run._run_user, raises)
- self.assertEqual([], run.result._events)
-
- def test__run_user_calls_onException(self):
- case = self.make_case()
- log = []
- def handler(exc_info):
- log.append("got it")
- self.assertEqual(3, len(exc_info))
- self.assertIsInstance(exc_info[1], KeyError)
- self.assertIs(KeyError, exc_info[0])
- case.addOnException(handler)
- e = KeyError('Yo')
- def raises():
- raise e
- def log_exc(self, result, err):
- log.append((result, err))
- run = RunTest(case, [(KeyError, log_exc)])
- run.result = ExtendedTestResult()
- status = run._run_user(raises)
- self.assertEqual(run.exception_caught, status)
- self.assertEqual([], run.result._events)
- self.assertEqual(["got it", (run.result, e)], log)
-
- def test__run_user_can_catch_Exception(self):
- case = self.make_case()
- e = Exception('Yo')
- def raises():
- raise e
- log = []
- def log_exc(self, result, err):
- log.append((result, err))
- run = RunTest(case, [(Exception, log_exc)])
- run.result = ExtendedTestResult()
- status = run._run_user(raises)
- self.assertEqual(run.exception_caught, status)
- self.assertEqual([], run.result._events)
- self.assertEqual([(run.result, e)], log)
-
- def test__run_user_uncaught_Exception_raised(self):
- case = self.make_case()
- e = KeyError('Yo')
- def raises():
- raise e
- log = []
- def log_exc(self, result, err):
- log.append((result, err))
- run = RunTest(case, [(ValueError, log_exc)])
- run.result = ExtendedTestResult()
- self.assertRaises(KeyError, run._run_user, raises)
- self.assertEqual([], run.result._events)
- self.assertEqual([], log)
-
- def test__run_user_uncaught_Exception_from_exception_handler_raised(self):
- case = self.make_case()
- def broken_handler(exc_info):
- # ValueError because thats what we know how to catch - and must
- # not.
- raise ValueError('boo')
- case.addOnException(broken_handler)
- e = KeyError('Yo')
- def raises():
- raise e
- log = []
- def log_exc(self, result, err):
- log.append((result, err))
- run = RunTest(case, [(ValueError, log_exc)])
- run.result = ExtendedTestResult()
- self.assertRaises(ValueError, run._run_user, raises)
- self.assertEqual([], run.result._events)
- self.assertEqual([], log)
-
- def test__run_user_returns_result(self):
- case = self.make_case()
- def returns():
- return 1
- run = RunTest(case)
- run.result = ExtendedTestResult()
- self.assertEqual(1, run._run_user(returns))
- self.assertEqual([], run.result._events)
-
- def test__run_one_decorates_result(self):
- log = []
- class Run(RunTest):
- def _run_prepared_result(self, result):
- log.append(result)
- return result
- run = Run(self.make_case(), lambda x: x)
- result = run._run_one('foo')
- self.assertEqual([result], log)
- self.assertIsInstance(log[0], ExtendedToOriginalDecorator)
- self.assertEqual('foo', result.decorated)
-
- def test__run_prepared_result_calls_start_and_stop_test(self):
- result = ExtendedTestResult()
- case = self.make_case()
- run = RunTest(case, lambda x: x)
- run.run(result)
- self.assertEqual([
- ('startTest', case),
- ('addSuccess', case),
- ('stopTest', case),
- ], result._events)
-
- def test__run_prepared_result_calls_stop_test_always(self):
- result = ExtendedTestResult()
- case = self.make_case()
- def inner():
- raise Exception("foo")
- run = RunTest(case, lambda x: x)
- run._run_core = inner
- self.assertRaises(Exception, run.run, result)
- self.assertEqual([
- ('startTest', case),
- ('stopTest', case),
- ], result._events)
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/subunit/python/testtools/tests/test_testresult.py b/lib/subunit/python/testtools/tests/test_testresult.py
deleted file mode 100644
index df15b91244..0000000000
--- a/lib/subunit/python/testtools/tests/test_testresult.py
+++ /dev/null
@@ -1,807 +0,0 @@
-# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
-
-"""Test TestResults and related things."""
-
-__metaclass__ = type
-
-import datetime
-try:
- from cStringIO import StringIO
-except ImportError:
- from io import StringIO
-import doctest
-import sys
-import threading
-
-from testtools import (
- ExtendedToOriginalDecorator,
- MultiTestResult,
- TestCase,
- TestResult,
- TextTestResult,
- ThreadsafeForwardingResult,
- testresult,
- )
-from testtools.content import Content, ContentType
-from testtools.matchers import DocTestMatches
-from testtools.utils import _u, _b
-from testtools.tests.helpers import (
- LoggingResult,
- Python26TestResult,
- Python27TestResult,
- ExtendedTestResult,
- an_exc_info
- )
-
-
-class TestTestResultContract(TestCase):
- """Tests for the contract of TestResults."""
-
- def test_addExpectedFailure(self):
- # Calling addExpectedFailure(test, exc_info) completes ok.
- result = self.makeResult()
- result.addExpectedFailure(self, an_exc_info)
-
- def test_addExpectedFailure_details(self):
- # Calling addExpectedFailure(test, details=xxx) completes ok.
- result = self.makeResult()
- result.addExpectedFailure(self, details={})
-
- def test_addError_details(self):
- # Calling addError(test, details=xxx) completes ok.
- result = self.makeResult()
- result.addError(self, details={})
-
- def test_addFailure_details(self):
- # Calling addFailure(test, details=xxx) completes ok.
- result = self.makeResult()
- result.addFailure(self, details={})
-
- def test_addSkipped(self):
- # Calling addSkip(test, reason) completes ok.
- result = self.makeResult()
- result.addSkip(self, _u("Skipped for some reason"))
-
- def test_addSkipped_details(self):
- # Calling addSkip(test, reason) completes ok.
- result = self.makeResult()
- result.addSkip(self, details={})
-
- def test_addUnexpectedSuccess(self):
- # Calling addUnexpectedSuccess(test) completes ok.
- result = self.makeResult()
- result.addUnexpectedSuccess(self)
-
- def test_addUnexpectedSuccess_details(self):
- # Calling addUnexpectedSuccess(test) completes ok.
- result = self.makeResult()
- result.addUnexpectedSuccess(self, details={})
-
- def test_addSuccess_details(self):
- # Calling addSuccess(test) completes ok.
- result = self.makeResult()
- result.addSuccess(self, details={})
-
- def test_startStopTestRun(self):
- # Calling startTestRun completes ok.
- result = self.makeResult()
- result.startTestRun()
- result.stopTestRun()
-
-
-class TestTestResultContract(TestTestResultContract):
-
- def makeResult(self):
- return TestResult()
-
-
-class TestMultiTestresultContract(TestTestResultContract):
-
- def makeResult(self):
- return MultiTestResult(TestResult(), TestResult())
-
-
-class TestTextTestResultContract(TestTestResultContract):
-
- def makeResult(self):
- return TextTestResult(StringIO())
-
-
-class TestThreadSafeForwardingResultContract(TestTestResultContract):
-
- def makeResult(self):
- result_semaphore = threading.Semaphore(1)
- target = TestResult()
- return ThreadsafeForwardingResult(target, result_semaphore)
-
-
-class TestTestResult(TestCase):
- """Tests for `TestResult`."""
-
- def makeResult(self):
- """Make an arbitrary result for testing."""
- return TestResult()
-
- def test_addSkipped(self):
- # Calling addSkip on a TestResult records the test that was skipped in
- # its skip_reasons dict.
- result = self.makeResult()
- result.addSkip(self, _u("Skipped for some reason"))
- self.assertEqual({_u("Skipped for some reason"):[self]},
- result.skip_reasons)
- result.addSkip(self, _u("Skipped for some reason"))
- self.assertEqual({_u("Skipped for some reason"):[self, self]},
- result.skip_reasons)
- result.addSkip(self, _u("Skipped for another reason"))
- self.assertEqual({_u("Skipped for some reason"):[self, self],
- _u("Skipped for another reason"):[self]},
- result.skip_reasons)
-
- def test_now_datetime_now(self):
- result = self.makeResult()
- olddatetime = testresult.real.datetime
- def restore():
- testresult.real.datetime = olddatetime
- self.addCleanup(restore)
- class Module:
- pass
- now = datetime.datetime.now()
- stubdatetime = Module()
- stubdatetime.datetime = Module()
- stubdatetime.datetime.now = lambda: now
- testresult.real.datetime = stubdatetime
- # Calling _now() looks up the time.
- self.assertEqual(now, result._now())
- then = now + datetime.timedelta(0, 1)
- # Set an explicit datetime, which gets returned from then on.
- result.time(then)
- self.assertNotEqual(now, result._now())
- self.assertEqual(then, result._now())
- # go back to looking it up.
- result.time(None)
- self.assertEqual(now, result._now())
-
- def test_now_datetime_time(self):
- result = self.makeResult()
- now = datetime.datetime.now()
- result.time(now)
- self.assertEqual(now, result._now())
-
-
-class TestWithFakeExceptions(TestCase):
-
- def makeExceptionInfo(self, exceptionFactory, *args, **kwargs):
- try:
- raise exceptionFactory(*args, **kwargs)
- except:
- return sys.exc_info()
-
-
-class TestMultiTestResult(TestWithFakeExceptions):
- """Tests for `MultiTestResult`."""
-
- def setUp(self):
- TestWithFakeExceptions.setUp(self)
- self.result1 = LoggingResult([])
- self.result2 = LoggingResult([])
- self.multiResult = MultiTestResult(self.result1, self.result2)
-
- def assertResultLogsEqual(self, expectedEvents):
- """Assert that our test results have received the expected events."""
- self.assertEqual(expectedEvents, self.result1._events)
- self.assertEqual(expectedEvents, self.result2._events)
-
- def test_empty(self):
- # Initializing a `MultiTestResult` doesn't do anything to its
- # `TestResult`s.
- self.assertResultLogsEqual([])
-
- def test_startTest(self):
- # Calling `startTest` on a `MultiTestResult` calls `startTest` on all
- # its `TestResult`s.
- self.multiResult.startTest(self)
- self.assertResultLogsEqual([('startTest', self)])
-
- def test_stopTest(self):
- # Calling `stopTest` on a `MultiTestResult` calls `stopTest` on all
- # its `TestResult`s.
- self.multiResult.stopTest(self)
- self.assertResultLogsEqual([('stopTest', self)])
-
- def test_addSkipped(self):
- # Calling `addSkip` on a `MultiTestResult` calls addSkip on its
- # results.
- reason = _u("Skipped for some reason")
- self.multiResult.addSkip(self, reason)
- self.assertResultLogsEqual([('addSkip', self, reason)])
-
- def test_addSuccess(self):
- # Calling `addSuccess` on a `MultiTestResult` calls `addSuccess` on
- # all its `TestResult`s.
- self.multiResult.addSuccess(self)
- self.assertResultLogsEqual([('addSuccess', self)])
-
- def test_done(self):
- # Calling `done` on a `MultiTestResult` calls `done` on all its
- # `TestResult`s.
- self.multiResult.done()
- self.assertResultLogsEqual([('done')])
-
- def test_addFailure(self):
- # Calling `addFailure` on a `MultiTestResult` calls `addFailure` on
- # all its `TestResult`s.
- exc_info = self.makeExceptionInfo(AssertionError, 'failure')
- self.multiResult.addFailure(self, exc_info)
- self.assertResultLogsEqual([('addFailure', self, exc_info)])
-
- def test_addError(self):
- # Calling `addError` on a `MultiTestResult` calls `addError` on all
- # its `TestResult`s.
- exc_info = self.makeExceptionInfo(RuntimeError, 'error')
- self.multiResult.addError(self, exc_info)
- self.assertResultLogsEqual([('addError', self, exc_info)])
-
- def test_startTestRun(self):
- # Calling `startTestRun` on a `MultiTestResult` forwards to all its
- # `TestResult`s.
- self.multiResult.startTestRun()
- self.assertResultLogsEqual([('startTestRun')])
-
- def test_stopTestRun(self):
- # Calling `stopTestRun` on a `MultiTestResult` forwards to all its
- # `TestResult`s.
- self.multiResult.stopTestRun()
- self.assertResultLogsEqual([('stopTestRun')])
-
-
-class TestTextTestResult(TestWithFakeExceptions):
- """Tests for `TextTestResult`."""
-
- def setUp(self):
- super(TestTextTestResult, self).setUp()
- self.result = TextTestResult(StringIO())
-
- def make_erroring_test(self):
- class Test(TestCase):
- def error(self):
- 1/0
- return Test("error")
-
- def make_failing_test(self):
- class Test(TestCase):
- def failed(self):
- self.fail("yo!")
- return Test("failed")
-
- def make_test(self):
- class Test(TestCase):
- def test(self):
- pass
- return Test("test")
-
- def getvalue(self):
- return self.result.stream.getvalue()
-
- def test__init_sets_stream(self):
- result = TextTestResult("fp")
- self.assertEqual("fp", result.stream)
-
- def reset_output(self):
- self.result.stream = StringIO()
-
- def test_startTestRun(self):
- self.result.startTestRun()
- self.assertEqual("Tests running...\n", self.getvalue())
-
- def test_stopTestRun_count_many(self):
- test = self.make_test()
- self.result.startTestRun()
- self.result.startTest(test)
- self.result.stopTest(test)
- self.result.startTest(test)
- self.result.stopTest(test)
- self.result.stream = StringIO()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("Ran 2 tests in ...s\n...", doctest.ELLIPSIS))
-
- def test_stopTestRun_count_single(self):
- test = self.make_test()
- self.result.startTestRun()
- self.result.startTest(test)
- self.result.stopTest(test)
- self.reset_output()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("Ran 1 test in ...s\n\nOK\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_count_zero(self):
- self.result.startTestRun()
- self.reset_output()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("Ran 0 tests in ...s\n\nOK\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_current_time(self):
- test = self.make_test()
- now = datetime.datetime.now()
- self.result.time(now)
- self.result.startTestRun()
- self.result.startTest(test)
- now = now + datetime.timedelta(0, 0, 0, 1)
- self.result.time(now)
- self.result.stopTest(test)
- self.reset_output()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("... in 0.001s\n...", doctest.ELLIPSIS))
-
- def test_stopTestRun_successful(self):
- self.result.startTestRun()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("...\n\nOK\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_not_successful_failure(self):
- test = self.make_failing_test()
- self.result.startTestRun()
- test.run(self.result)
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_not_successful_error(self):
- test = self.make_erroring_test()
- self.result.startTestRun()
- test.run(self.result)
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("...\n\nFAILED (failures=1)\n", doctest.ELLIPSIS))
-
- def test_stopTestRun_shows_details(self):
- self.result.startTestRun()
- self.make_erroring_test().run(self.result)
- self.make_failing_test().run(self.result)
- self.reset_output()
- self.result.stopTestRun()
- self.assertThat(self.getvalue(),
- DocTestMatches("""...======================================================================
-ERROR: testtools.tests.test_testresult.Test.error
-----------------------------------------------------------------------
-Text attachment: traceback
-------------
-Traceback (most recent call last):
- File "...testtools...runtest.py", line ..., in _run_user...
- return fn(*args)
- File "...testtools...testcase.py", line ..., in _run_test_method
- testMethod()
- File "...testtools...tests...test_testresult.py", line ..., in error
- 1/0
-ZeroDivisionError: int... division or modulo by zero
-------------
-======================================================================
-FAIL: testtools.tests.test_testresult.Test.failed
-----------------------------------------------------------------------
-Text attachment: traceback
-------------
-Traceback (most recent call last):
- File "...testtools...runtest.py", line ..., in _run_user...
- return fn(*args)
- File "...testtools...testcase.py", line ..., in _run_test_method
- testMethod()
- File "...testtools...tests...test_testresult.py", line ..., in failed
- self.fail("yo!")
-AssertionError: yo!
-------------
-...""", doctest.ELLIPSIS))
-
-
-class TestThreadSafeForwardingResult(TestWithFakeExceptions):
- """Tests for `MultiTestResult`."""
-
- def setUp(self):
- TestWithFakeExceptions.setUp(self)
- self.result_semaphore = threading.Semaphore(1)
- self.target = LoggingResult([])
- self.result1 = ThreadsafeForwardingResult(self.target,
- self.result_semaphore)
-
- def test_nonforwarding_methods(self):
- # startTest and stopTest are not forwarded because they need to be
- # batched.
- self.result1.startTest(self)
- self.result1.stopTest(self)
- self.assertEqual([], self.target._events)
-
- def test_startTestRun(self):
- self.result1.startTestRun()
- self.result2 = ThreadsafeForwardingResult(self.target,
- self.result_semaphore)
- self.result2.startTestRun()
- self.assertEqual(["startTestRun", "startTestRun"], self.target._events)
-
- def test_stopTestRun(self):
- self.result1.stopTestRun()
- self.result2 = ThreadsafeForwardingResult(self.target,
- self.result_semaphore)
- self.result2.stopTestRun()
- self.assertEqual(["stopTestRun", "stopTestRun"], self.target._events)
-
- def test_forwarding_methods(self):
- # error, failure, skip and success are forwarded in batches.
- exc_info1 = self.makeExceptionInfo(RuntimeError, 'error')
- self.result1.addError(self, exc_info1)
- exc_info2 = self.makeExceptionInfo(AssertionError, 'failure')
- self.result1.addFailure(self, exc_info2)
- reason = _u("Skipped for some reason")
- self.result1.addSkip(self, reason)
- self.result1.addSuccess(self)
- self.assertEqual([('startTest', self),
- ('addError', self, exc_info1),
- ('stopTest', self),
- ('startTest', self),
- ('addFailure', self, exc_info2),
- ('stopTest', self),
- ('startTest', self),
- ('addSkip', self, reason),
- ('stopTest', self),
- ('startTest', self),
- ('addSuccess', self),
- ('stopTest', self),
- ], self.target._events)
-
-
-class TestExtendedToOriginalResultDecoratorBase(TestCase):
-
- def make_26_result(self):
- self.result = Python26TestResult()
- self.make_converter()
-
- def make_27_result(self):
- self.result = Python27TestResult()
- self.make_converter()
-
- def make_converter(self):
- self.converter = ExtendedToOriginalDecorator(self.result)
-
- def make_extended_result(self):
- self.result = ExtendedTestResult()
- self.make_converter()
-
- def check_outcome_details(self, outcome):
- """Call an outcome with a details dict to be passed through."""
- # This dict is /not/ convertible - thats deliberate, as it should
- # not hit the conversion code path.
- details = {'foo': 'bar'}
- getattr(self.converter, outcome)(self, details=details)
- self.assertEqual([(outcome, self, details)], self.result._events)
-
- def get_details_and_string(self):
- """Get a details dict and expected string."""
- text1 = lambda: [_b("1\n2\n")]
- text2 = lambda: [_b("3\n4\n")]
- bin1 = lambda: [_b("5\n")]
- details = {'text 1': Content(ContentType('text', 'plain'), text1),
- 'text 2': Content(ContentType('text', 'strange'), text2),
- 'bin 1': Content(ContentType('application', 'binary'), bin1)}
- return (details, "Binary content: bin 1\n"
- "Text attachment: text 1\n------------\n1\n2\n"
- "------------\nText attachment: text 2\n------------\n"
- "3\n4\n------------\n")
-
- def check_outcome_details_to_exec_info(self, outcome, expected=None):
- """Call an outcome with a details dict to be made into exc_info."""
- # The conversion is a done using RemoteError and the string contents
- # of the text types in the details dict.
- if not expected:
- expected = outcome
- details, err_str = self.get_details_and_string()
- getattr(self.converter, outcome)(self, details=details)
- err = self.converter._details_to_exc_info(details)
- self.assertEqual([(expected, self, err)], self.result._events)
-
- def check_outcome_details_to_nothing(self, outcome, expected=None):
- """Call an outcome with a details dict to be swallowed."""
- if not expected:
- expected = outcome
- details = {'foo': 'bar'}
- getattr(self.converter, outcome)(self, details=details)
- self.assertEqual([(expected, self)], self.result._events)
-
- def check_outcome_details_to_string(self, outcome):
- """Call an outcome with a details dict to be stringified."""
- details, err_str = self.get_details_and_string()
- getattr(self.converter, outcome)(self, details=details)
- self.assertEqual([(outcome, self, err_str)], self.result._events)
-
- def check_outcome_exc_info(self, outcome, expected=None):
- """Check that calling a legacy outcome still works."""
- # calling some outcome with the legacy exc_info style api (no keyword
- # parameters) gets passed through.
- if not expected:
- expected = outcome
- err = sys.exc_info()
- getattr(self.converter, outcome)(self, err)
- self.assertEqual([(expected, self, err)], self.result._events)
-
- def check_outcome_exc_info_to_nothing(self, outcome, expected=None):
- """Check that calling a legacy outcome on a fallback works."""
- # calling some outcome with the legacy exc_info style api (no keyword
- # parameters) gets passed through.
- if not expected:
- expected = outcome
- err = sys.exc_info()
- getattr(self.converter, outcome)(self, err)
- self.assertEqual([(expected, self)], self.result._events)
-
- def check_outcome_nothing(self, outcome, expected=None):
- """Check that calling a legacy outcome still works."""
- if not expected:
- expected = outcome
- getattr(self.converter, outcome)(self)
- self.assertEqual([(expected, self)], self.result._events)
-
- def check_outcome_string_nothing(self, outcome, expected):
- """Check that calling outcome with a string calls expected."""
- getattr(self.converter, outcome)(self, "foo")
- self.assertEqual([(expected, self)], self.result._events)
-
- def check_outcome_string(self, outcome):
- """Check that calling outcome with a string works."""
- getattr(self.converter, outcome)(self, "foo")
- self.assertEqual([(outcome, self, "foo")], self.result._events)
-
-
-class TestExtendedToOriginalResultDecorator(
- TestExtendedToOriginalResultDecoratorBase):
-
- def test_progress_py26(self):
- self.make_26_result()
- self.converter.progress(1, 2)
-
- def test_progress_py27(self):
- self.make_27_result()
- self.converter.progress(1, 2)
-
- def test_progress_pyextended(self):
- self.make_extended_result()
- self.converter.progress(1, 2)
- self.assertEqual([('progress', 1, 2)], self.result._events)
-
- def test_shouldStop(self):
- self.make_26_result()
- self.assertEqual(False, self.converter.shouldStop)
- self.converter.decorated.stop()
- self.assertEqual(True, self.converter.shouldStop)
-
- def test_startTest_py26(self):
- self.make_26_result()
- self.converter.startTest(self)
- self.assertEqual([('startTest', self)], self.result._events)
-
- def test_startTest_py27(self):
- self.make_27_result()
- self.converter.startTest(self)
- self.assertEqual([('startTest', self)], self.result._events)
-
- def test_startTest_pyextended(self):
- self.make_extended_result()
- self.converter.startTest(self)
- self.assertEqual([('startTest', self)], self.result._events)
-
- def test_startTestRun_py26(self):
- self.make_26_result()
- self.converter.startTestRun()
- self.assertEqual([], self.result._events)
-
- def test_startTestRun_py27(self):
- self.make_27_result()
- self.converter.startTestRun()
- self.assertEqual([('startTestRun',)], self.result._events)
-
- def test_startTestRun_pyextended(self):
- self.make_extended_result()
- self.converter.startTestRun()
- self.assertEqual([('startTestRun',)], self.result._events)
-
- def test_stopTest_py26(self):
- self.make_26_result()
- self.converter.stopTest(self)
- self.assertEqual([('stopTest', self)], self.result._events)
-
- def test_stopTest_py27(self):
- self.make_27_result()
- self.converter.stopTest(self)
- self.assertEqual([('stopTest', self)], self.result._events)
-
- def test_stopTest_pyextended(self):
- self.make_extended_result()
- self.converter.stopTest(self)
- self.assertEqual([('stopTest', self)], self.result._events)
-
- def test_stopTestRun_py26(self):
- self.make_26_result()
- self.converter.stopTestRun()
- self.assertEqual([], self.result._events)
-
- def test_stopTestRun_py27(self):
- self.make_27_result()
- self.converter.stopTestRun()
- self.assertEqual([('stopTestRun',)], self.result._events)
-
- def test_stopTestRun_pyextended(self):
- self.make_extended_result()
- self.converter.stopTestRun()
- self.assertEqual([('stopTestRun',)], self.result._events)
-
- def test_tags_py26(self):
- self.make_26_result()
- self.converter.tags(1, 2)
-
- def test_tags_py27(self):
- self.make_27_result()
- self.converter.tags(1, 2)
-
- def test_tags_pyextended(self):
- self.make_extended_result()
- self.converter.tags(1, 2)
- self.assertEqual([('tags', 1, 2)], self.result._events)
-
- def test_time_py26(self):
- self.make_26_result()
- self.converter.time(1)
-
- def test_time_py27(self):
- self.make_27_result()
- self.converter.time(1)
-
- def test_time_pyextended(self):
- self.make_extended_result()
- self.converter.time(1)
- self.assertEqual([('time', 1)], self.result._events)
-
-
-class TestExtendedToOriginalAddError(TestExtendedToOriginalResultDecoratorBase):
-
- outcome = 'addError'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- self.check_outcome_exc_info(self.outcome)
-
- def test_outcome_Original_py27(self):
- self.make_27_result()
- self.check_outcome_exc_info(self.outcome)
-
- def test_outcome_Original_pyextended(self):
- self.make_extended_result()
- self.check_outcome_exc_info(self.outcome)
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- self.check_outcome_details_to_exec_info(self.outcome)
-
- def test_outcome_Extended_py27(self):
- self.make_27_result()
- self.check_outcome_details_to_exec_info(self.outcome)
-
- def test_outcome_Extended_pyextended(self):
- self.make_extended_result()
- self.check_outcome_details(self.outcome)
-
- def test_outcome__no_details(self):
- self.make_extended_result()
- self.assertRaises(ValueError,
- getattr(self.converter, self.outcome), self)
-
-
-class TestExtendedToOriginalAddFailure(
- TestExtendedToOriginalAddError):
-
- outcome = 'addFailure'
-
-
-class TestExtendedToOriginalAddExpectedFailure(
- TestExtendedToOriginalAddError):
-
- outcome = 'addExpectedFailure'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- self.check_outcome_exc_info_to_nothing(self.outcome, 'addSuccess')
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- self.check_outcome_details_to_nothing(self.outcome, 'addSuccess')
-
-
-
-class TestExtendedToOriginalAddSkip(
- TestExtendedToOriginalResultDecoratorBase):
-
- outcome = 'addSkip'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- self.check_outcome_string_nothing(self.outcome, 'addSuccess')
-
- def test_outcome_Original_py27(self):
- self.make_27_result()
- self.check_outcome_string(self.outcome)
-
- def test_outcome_Original_pyextended(self):
- self.make_extended_result()
- self.check_outcome_string(self.outcome)
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- self.check_outcome_string_nothing(self.outcome, 'addSuccess')
-
- def test_outcome_Extended_py27(self):
- self.make_27_result()
- self.check_outcome_details_to_string(self.outcome)
-
- def test_outcome_Extended_pyextended(self):
- self.make_extended_result()
- self.check_outcome_details(self.outcome)
-
- def test_outcome__no_details(self):
- self.make_extended_result()
- self.assertRaises(ValueError,
- getattr(self.converter, self.outcome), self)
-
-
-class TestExtendedToOriginalAddSuccess(
- TestExtendedToOriginalResultDecoratorBase):
-
- outcome = 'addSuccess'
- expected = 'addSuccess'
-
- def test_outcome_Original_py26(self):
- self.make_26_result()
- self.check_outcome_nothing(self.outcome, self.expected)
-
- def test_outcome_Original_py27(self):
- self.make_27_result()
- self.check_outcome_nothing(self.outcome)
-
- def test_outcome_Original_pyextended(self):
- self.make_extended_result()
- self.check_outcome_nothing(self.outcome)
-
- def test_outcome_Extended_py26(self):
- self.make_26_result()
- self.check_outcome_details_to_nothing(self.outcome, self.expected)
-
- def test_outcome_Extended_py27(self):
- self.make_27_result()
- self.check_outcome_details_to_nothing(self.outcome)
-
- def test_outcome_Extended_pyextended(self):
- self.make_extended_result()
- self.check_outcome_details(self.outcome)
-
-
-class TestExtendedToOriginalAddUnexpectedSuccess(
- TestExtendedToOriginalAddSuccess):
-
- outcome = 'addUnexpectedSuccess'
-
-
-class TestExtendedToOriginalResultOtherAttributes(
- TestExtendedToOriginalResultDecoratorBase):
-
- def test_other_attribute(self):
- class OtherExtendedResult:
- def foo(self):
- return 2
- bar = 1
- self.result = OtherExtendedResult()
- self.make_converter()
- self.assertEqual(1, self.converter.bar)
- self.assertEqual(2, self.converter.foo())
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/subunit/python/testtools/tests/test_testsuite.py b/lib/subunit/python/testtools/tests/test_testsuite.py
deleted file mode 100644
index 3f2f02758f..0000000000
--- a/lib/subunit/python/testtools/tests/test_testsuite.py
+++ /dev/null
@@ -1,56 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Test ConcurrentTestSuite and related things."""
-
-__metaclass__ = type
-
-import unittest
-
-from testtools import (
- ConcurrentTestSuite,
- iterate_tests,
- TestCase,
- )
-from testtools.matchers import (
- Equals,
- )
-from testtools.tests.helpers import LoggingResult
-
-
-class TestConcurrentTestSuiteRun(TestCase):
-
- def test_trivial(self):
- log = []
- result = LoggingResult(log)
- class Sample(TestCase):
- def __hash__(self):
- return id(self)
-
- def test_method1(self):
- pass
- def test_method2(self):
- pass
- test1 = Sample('test_method1')
- test2 = Sample('test_method2')
- original_suite = unittest.TestSuite([test1, test2])
- suite = ConcurrentTestSuite(original_suite, self.split_suite)
- suite.run(result)
- test1 = log[0][1]
- test2 = log[-1][1]
- self.assertIsInstance(test1, Sample)
- self.assertIsInstance(test2, Sample)
- self.assertNotEqual(test1.id(), test2.id())
- # We expect the start/outcome/stop to be grouped
- expected = [('startTest', test1), ('addSuccess', test1),
- ('stopTest', test1), ('startTest', test2), ('addSuccess', test2),
- ('stopTest', test2)]
- self.assertThat(log, Equals(expected))
-
- def split_suite(self, suite):
- tests = list(iterate_tests(suite))
- return tests[0], tests[1]
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/subunit/python/testtools/tests/test_testtools.py b/lib/subunit/python/testtools/tests/test_testtools.py
deleted file mode 100644
index af1fd794c3..0000000000
--- a/lib/subunit/python/testtools/tests/test_testtools.py
+++ /dev/null
@@ -1,755 +0,0 @@
-# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
-
-"""Tests for extensions to the base test library."""
-
-import sys
-import unittest
-
-from testtools import (
- TestCase,
- clone_test_with_new_id,
- content,
- skip,
- skipIf,
- skipUnless,
- testcase,
- )
-from testtools.matchers import (
- Equals,
- )
-from testtools.tests.helpers import (
- an_exc_info,
- LoggingResult,
- Python26TestResult,
- Python27TestResult,
- ExtendedTestResult,
- )
-
-
-class TestEquality(TestCase):
- """Test `TestCase`'s equality implementation."""
-
- def test_identicalIsEqual(self):
- # TestCase's are equal if they are identical.
- self.assertEqual(self, self)
-
- def test_nonIdenticalInUnequal(self):
- # TestCase's are not equal if they are not identical.
- self.assertNotEqual(TestCase(methodName='run'),
- TestCase(methodName='skip'))
-
-
-class TestAssertions(TestCase):
- """Test assertions in TestCase."""
-
- def raiseError(self, exceptionFactory, *args, **kwargs):
- raise exceptionFactory(*args, **kwargs)
-
- def test_formatTypes_single(self):
- # Given a single class, _formatTypes returns the name.
- class Foo:
- pass
- self.assertEqual('Foo', self._formatTypes(Foo))
-
- def test_formatTypes_multiple(self):
- # Given multiple types, _formatTypes returns the names joined by
- # commas.
- class Foo:
- pass
- class Bar:
- pass
- self.assertEqual('Foo, Bar', self._formatTypes([Foo, Bar]))
-
- def test_assertRaises(self):
- # assertRaises asserts that a callable raises a particular exception.
- self.assertRaises(RuntimeError, self.raiseError, RuntimeError)
-
- def test_assertRaises_fails_when_no_error_raised(self):
- # assertRaises raises self.failureException when it's passed a
- # callable that raises no error.
- ret = ('orange', 42)
- try:
- self.assertRaises(RuntimeError, lambda: ret)
- except self.failureException:
- # We expected assertRaises to raise this exception.
- e = sys.exc_info()[1]
- self.assertEqual(
- '%s not raised, %r returned instead.'
- % (self._formatTypes(RuntimeError), ret), str(e))
- else:
- self.fail('Expected assertRaises to fail, but it did not.')
-
- def test_assertRaises_fails_when_different_error_raised(self):
- # assertRaises re-raises an exception that it didn't expect.
- self.assertRaises(
- ZeroDivisionError,
- self.assertRaises,
- RuntimeError, self.raiseError, ZeroDivisionError)
-
- def test_assertRaises_returns_the_raised_exception(self):
- # assertRaises returns the exception object that was raised. This is
- # useful for testing that exceptions have the right message.
-
- # This contraption stores the raised exception, so we can compare it
- # to the return value of assertRaises.
- raisedExceptions = []
- def raiseError():
- try:
- raise RuntimeError('Deliberate error')
- except RuntimeError:
- raisedExceptions.append(sys.exc_info()[1])
- raise
-
- exception = self.assertRaises(RuntimeError, raiseError)
- self.assertEqual(1, len(raisedExceptions))
- self.assertTrue(
- exception is raisedExceptions[0],
- "%r is not %r" % (exception, raisedExceptions[0]))
-
- def test_assertRaises_with_multiple_exceptions(self):
- # assertRaises((ExceptionOne, ExceptionTwo), function) asserts that
- # function raises one of ExceptionTwo or ExceptionOne.
- expectedExceptions = (RuntimeError, ZeroDivisionError)
- self.assertRaises(
- expectedExceptions, self.raiseError, expectedExceptions[0])
- self.assertRaises(
- expectedExceptions, self.raiseError, expectedExceptions[1])
-
- def test_assertRaises_with_multiple_exceptions_failure_mode(self):
- # If assertRaises is called expecting one of a group of exceptions and
- # a callable that doesn't raise an exception, then fail with an
- # appropriate error message.
- expectedExceptions = (RuntimeError, ZeroDivisionError)
- failure = self.assertRaises(
- self.failureException,
- self.assertRaises, expectedExceptions, lambda: None)
- self.assertEqual(
- '%s not raised, None returned instead.'
- % self._formatTypes(expectedExceptions), str(failure))
-
- def assertFails(self, message, function, *args, **kwargs):
- """Assert that function raises a failure with the given message."""
- failure = self.assertRaises(
- self.failureException, function, *args, **kwargs)
- self.assertEqual(message, str(failure))
-
- def test_assertIn_success(self):
- # assertIn(needle, haystack) asserts that 'needle' is in 'haystack'.
- self.assertIn(3, range(10))
- self.assertIn('foo', 'foo bar baz')
- self.assertIn('foo', 'foo bar baz'.split())
-
- def test_assertIn_failure(self):
- # assertIn(needle, haystack) fails the test when 'needle' is not in
- # 'haystack'.
- self.assertFails('3 not in [0, 1, 2]', self.assertIn, 3, [0, 1, 2])
- self.assertFails(
- '%r not in %r' % ('qux', 'foo bar baz'),
- self.assertIn, 'qux', 'foo bar baz')
-
- def test_assertNotIn_success(self):
- # assertNotIn(needle, haystack) asserts that 'needle' is not in
- # 'haystack'.
- self.assertNotIn(3, [0, 1, 2])
- self.assertNotIn('qux', 'foo bar baz')
-
- def test_assertNotIn_failure(self):
- # assertNotIn(needle, haystack) fails the test when 'needle' is in
- # 'haystack'.
- self.assertFails('3 in [1, 2, 3]', self.assertNotIn, 3, [1, 2, 3])
- self.assertFails(
- '%r in %r' % ('foo', 'foo bar baz'),
- self.assertNotIn, 'foo', 'foo bar baz')
-
- def test_assertIsInstance(self):
- # assertIsInstance asserts that an object is an instance of a class.
-
- class Foo:
- """Simple class for testing assertIsInstance."""
-
- foo = Foo()
- self.assertIsInstance(foo, Foo)
-
- def test_assertIsInstance_multiple_classes(self):
- # assertIsInstance asserts that an object is an instance of one of a
- # group of classes.
-
- class Foo:
- """Simple class for testing assertIsInstance."""
-
- class Bar:
- """Another simple class for testing assertIsInstance."""
-
- foo = Foo()
- self.assertIsInstance(foo, (Foo, Bar))
- self.assertIsInstance(Bar(), (Foo, Bar))
-
- def test_assertIsInstance_failure(self):
- # assertIsInstance(obj, klass) fails the test when obj is not an
- # instance of klass.
-
- class Foo:
- """Simple class for testing assertIsInstance."""
-
- self.assertFails(
- '42 is not an instance of %s' % self._formatTypes(Foo),
- self.assertIsInstance, 42, Foo)
-
- def test_assertIsInstance_failure_multiple_classes(self):
- # assertIsInstance(obj, (klass1, klass2)) fails the test when obj is
- # not an instance of klass1 or klass2.
-
- class Foo:
- """Simple class for testing assertIsInstance."""
-
- class Bar:
- """Another simple class for testing assertIsInstance."""
-
- self.assertFails(
- '42 is not an instance of %s' % self._formatTypes([Foo, Bar]),
- self.assertIsInstance, 42, (Foo, Bar))
-
- def test_assertIs(self):
- # assertIs asserts that an object is identical to another object.
- self.assertIs(None, None)
- some_list = [42]
- self.assertIs(some_list, some_list)
- some_object = object()
- self.assertIs(some_object, some_object)
-
- def test_assertIs_fails(self):
- # assertIs raises assertion errors if one object is not identical to
- # another.
- self.assertFails('None is not 42', self.assertIs, None, 42)
- self.assertFails('[42] is not [42]', self.assertIs, [42], [42])
-
- def test_assertIs_fails_with_message(self):
- # assertIs raises assertion errors if one object is not identical to
- # another, and includes a user-supplied message, if it's provided.
- self.assertFails(
- 'None is not 42: foo bar', self.assertIs, None, 42, 'foo bar')
-
- def test_assertIsNot(self):
- # assertIsNot asserts that an object is not identical to another
- # object.
- self.assertIsNot(None, 42)
- self.assertIsNot([42], [42])
- self.assertIsNot(object(), object())
-
- def test_assertIsNot_fails(self):
- # assertIsNot raises assertion errors if one object is identical to
- # another.
- self.assertFails('None is None', self.assertIsNot, None, None)
- some_list = [42]
- self.assertFails(
- '[42] is [42]', self.assertIsNot, some_list, some_list)
-
- def test_assertIsNot_fails_with_message(self):
- # assertIsNot raises assertion errors if one object is identical to
- # another, and includes a user-supplied message if it's provided.
- self.assertFails(
- 'None is None: foo bar', self.assertIsNot, None, None, "foo bar")
-
- def test_assertThat_matches_clean(self):
- class Matcher:
- def match(self, foo):
- return None
- self.assertThat("foo", Matcher())
-
- def test_assertThat_mismatch_raises_description(self):
- calls = []
- class Mismatch:
- def __init__(self, thing):
- self.thing = thing
- def describe(self):
- calls.append(('describe_diff', self.thing))
- return "object is not a thing"
- class Matcher:
- def match(self, thing):
- calls.append(('match', thing))
- return Mismatch(thing)
- def __str__(self):
- calls.append(('__str__',))
- return "a description"
- class Test(TestCase):
- def test(self):
- self.assertThat("foo", Matcher())
- result = Test("test").run()
- self.assertEqual([
- ('match', "foo"),
- ('describe_diff', "foo"),
- ('__str__',),
- ], calls)
- self.assertFalse(result.wasSuccessful())
-
-
-class TestAddCleanup(TestCase):
- """Tests for TestCase.addCleanup."""
-
- class LoggingTest(TestCase):
- """A test that logs calls to setUp, runTest and tearDown."""
-
- def setUp(self):
- TestCase.setUp(self)
- self._calls = ['setUp']
-
- def brokenSetUp(self):
- # A tearDown that deliberately fails.
- self._calls = ['brokenSetUp']
- raise RuntimeError('Deliberate Failure')
-
- def runTest(self):
- self._calls.append('runTest')
-
- def tearDown(self):
- self._calls.append('tearDown')
- TestCase.tearDown(self)
-
- def setUp(self):
- TestCase.setUp(self)
- self._result_calls = []
- self.test = TestAddCleanup.LoggingTest('runTest')
- self.logging_result = LoggingResult(self._result_calls)
-
- def assertErrorLogEqual(self, messages):
- self.assertEqual(messages, [call[0] for call in self._result_calls])
-
- def assertTestLogEqual(self, messages):
- """Assert that the call log equals 'messages'."""
- case = self._result_calls[0][1]
- self.assertEqual(messages, case._calls)
-
- def logAppender(self, message):
- """A cleanup that appends 'message' to the tests log.
-
- Cleanups are callables that are added to a test by addCleanup. To
- verify that our cleanups run in the right order, we add strings to a
- list that acts as a log. This method returns a cleanup that will add
- the given message to that log when run.
- """
- self.test._calls.append(message)
-
- def test_fixture(self):
- # A normal run of self.test logs 'setUp', 'runTest' and 'tearDown'.
- # This test doesn't test addCleanup itself, it just sanity checks the
- # fixture.
- self.test.run(self.logging_result)
- self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
-
- def test_cleanup_run_before_tearDown(self):
- # Cleanup functions added with 'addCleanup' are called before tearDown
- # runs.
- self.test.addCleanup(self.logAppender, 'cleanup')
- self.test.run(self.logging_result)
- self.assertTestLogEqual(['setUp', 'runTest', 'tearDown', 'cleanup'])
-
- def test_add_cleanup_called_if_setUp_fails(self):
- # Cleanup functions added with 'addCleanup' are called even if setUp
- # fails. Note that tearDown has a different behavior: it is only
- # called when setUp succeeds.
- self.test.setUp = self.test.brokenSetUp
- self.test.addCleanup(self.logAppender, 'cleanup')
- self.test.run(self.logging_result)
- self.assertTestLogEqual(['brokenSetUp', 'cleanup'])
-
- def test_addCleanup_called_in_reverse_order(self):
- # Cleanup functions added with 'addCleanup' are called in reverse
- # order.
- #
- # One of the main uses of addCleanup is to dynamically create
- # resources that need some sort of explicit tearDown. Often one
- # resource will be created in terms of another, e.g.,
- # self.first = self.makeFirst()
- # self.second = self.makeSecond(self.first)
- #
- # When this happens, we generally want to clean up the second resource
- # before the first one, since the second depends on the first.
- self.test.addCleanup(self.logAppender, 'first')
- self.test.addCleanup(self.logAppender, 'second')
- self.test.run(self.logging_result)
- self.assertTestLogEqual(
- ['setUp', 'runTest', 'tearDown', 'second', 'first'])
-
- def test_tearDown_runs_after_cleanup_failure(self):
- # tearDown runs even if a cleanup function fails.
- self.test.addCleanup(lambda: 1/0)
- self.test.run(self.logging_result)
- self.assertTestLogEqual(['setUp', 'runTest', 'tearDown'])
-
- def test_cleanups_continue_running_after_error(self):
- # All cleanups are always run, even if one or two of them fail.
- self.test.addCleanup(self.logAppender, 'first')
- self.test.addCleanup(lambda: 1/0)
- self.test.addCleanup(self.logAppender, 'second')
- self.test.run(self.logging_result)
- self.assertTestLogEqual(
- ['setUp', 'runTest', 'tearDown', 'second', 'first'])
-
- def test_error_in_cleanups_are_captured(self):
- # If a cleanup raises an error, we want to record it and fail the the
- # test, even though we go on to run other cleanups.
- self.test.addCleanup(lambda: 1/0)
- self.test.run(self.logging_result)
- self.assertErrorLogEqual(['startTest', 'addError', 'stopTest'])
-
- def test_keyboard_interrupt_not_caught(self):
- # If a cleanup raises KeyboardInterrupt, it gets reraised.
- def raiseKeyboardInterrupt():
- raise KeyboardInterrupt()
- self.test.addCleanup(raiseKeyboardInterrupt)
- self.assertRaises(
- KeyboardInterrupt, self.test.run, self.logging_result)
-
- def test_multipleErrorsReported(self):
- # Errors from all failing cleanups are reported.
- self.test.addCleanup(lambda: 1/0)
- self.test.addCleanup(lambda: 1/0)
- self.test.run(self.logging_result)
- self.assertErrorLogEqual(
- ['startTest', 'addError', 'addError', 'stopTest'])
-
-
-class TestWithDetails(TestCase):
-
- def assertDetailsProvided(self, case, expected_outcome, expected_keys):
- """Assert that when case is run, details are provided to the result.
-
- :param case: A TestCase to run.
- :param expected_outcome: The call that should be made.
- :param expected_keys: The keys to look for.
- """
- result = ExtendedTestResult()
- case.run(result)
- case = result._events[0][1]
- expected = [
- ('startTest', case),
- (expected_outcome, case),
- ('stopTest', case),
- ]
- self.assertEqual(3, len(result._events))
- self.assertEqual(expected[0], result._events[0])
- self.assertEqual(expected[1], result._events[1][0:2])
- # Checking the TB is right is rather tricky. doctest line matching
- # would help, but 'meh'.
- self.assertEqual(sorted(expected_keys),
- sorted(result._events[1][2].keys()))
- self.assertEqual(expected[-1], result._events[-1])
-
- def get_content(self):
- return content.Content(
- content.ContentType("text", "foo"), lambda: ['foo'])
-
-
-class TestExpectedFailure(TestWithDetails):
- """Tests for expected failures and unexpected successess."""
-
- def make_unexpected_case(self):
- class Case(TestCase):
- def test(self):
- raise testcase._UnexpectedSuccess
- case = Case('test')
- return case
-
- def test_raising__UnexpectedSuccess_py27(self):
- case = self.make_unexpected_case()
- result = Python27TestResult()
- case.run(result)
- case = result._events[0][1]
- self.assertEqual([
- ('startTest', case),
- ('addUnexpectedSuccess', case),
- ('stopTest', case),
- ], result._events)
-
- def test_raising__UnexpectedSuccess_extended(self):
- case = self.make_unexpected_case()
- result = ExtendedTestResult()
- case.run(result)
- case = result._events[0][1]
- self.assertEqual([
- ('startTest', case),
- ('addUnexpectedSuccess', case, {}),
- ('stopTest', case),
- ], result._events)
-
- def make_xfail_case_xfails(self):
- content = self.get_content()
- class Case(TestCase):
- def test(self):
- self.addDetail("foo", content)
- self.expectFailure("we are sad", self.assertEqual,
- 1, 0)
- case = Case('test')
- return case
-
- def make_xfail_case_succeeds(self):
- content = self.get_content()
- class Case(TestCase):
- def test(self):
- self.addDetail("foo", content)
- self.expectFailure("we are sad", self.assertEqual,
- 1, 1)
- case = Case('test')
- return case
-
- def test_expectFailure_KnownFailure_extended(self):
- case = self.make_xfail_case_xfails()
- self.assertDetailsProvided(case, "addExpectedFailure",
- ["foo", "traceback", "reason"])
-
- def test_expectFailure_KnownFailure_unexpected_success(self):
- case = self.make_xfail_case_succeeds()
- self.assertDetailsProvided(case, "addUnexpectedSuccess",
- ["foo", "reason"])
-
-
-class TestUniqueFactories(TestCase):
- """Tests for getUniqueString and getUniqueInteger."""
-
- def test_getUniqueInteger(self):
- # getUniqueInteger returns an integer that increments each time you
- # call it.
- one = self.getUniqueInteger()
- self.assertEqual(1, one)
- two = self.getUniqueInteger()
- self.assertEqual(2, two)
-
- def test_getUniqueString(self):
- # getUniqueString returns the current test id followed by a unique
- # integer.
- name_one = self.getUniqueString()
- self.assertEqual('%s-%d' % (self.id(), 1), name_one)
- name_two = self.getUniqueString()
- self.assertEqual('%s-%d' % (self.id(), 2), name_two)
-
- def test_getUniqueString_prefix(self):
- # If getUniqueString is given an argument, it uses that argument as
- # the prefix of the unique string, rather than the test id.
- name_one = self.getUniqueString('foo')
- self.assertThat(name_one, Equals('foo-1'))
- name_two = self.getUniqueString('bar')
- self.assertThat(name_two, Equals('bar-2'))
-
-
-class TestCloneTestWithNewId(TestCase):
- """Tests for clone_test_with_new_id."""
-
- def test_clone_test_with_new_id(self):
- class FooTestCase(TestCase):
- def test_foo(self):
- pass
- test = FooTestCase('test_foo')
- oldName = test.id()
- newName = self.getUniqueString()
- newTest = clone_test_with_new_id(test, newName)
- self.assertEqual(newName, newTest.id())
- self.assertEqual(oldName, test.id(),
- "the original test instance should be unchanged.")
-
-
-class TestDetailsProvided(TestWithDetails):
-
- def test_addDetail(self):
- mycontent = self.get_content()
- self.addDetail("foo", mycontent)
- details = self.getDetails()
- self.assertEqual({"foo": mycontent}, details)
-
- def test_addError(self):
- class Case(TestCase):
- def test(this):
- this.addDetail("foo", self.get_content())
- 1/0
- self.assertDetailsProvided(Case("test"), "addError",
- ["foo", "traceback"])
-
- def test_addFailure(self):
- class Case(TestCase):
- def test(this):
- this.addDetail("foo", self.get_content())
- self.fail('yo')
- self.assertDetailsProvided(Case("test"), "addFailure",
- ["foo", "traceback"])
-
- def test_addSkip(self):
- class Case(TestCase):
- def test(this):
- this.addDetail("foo", self.get_content())
- self.skip('yo')
- self.assertDetailsProvided(Case("test"), "addSkip",
- ["foo", "reason"])
-
- def test_addSucccess(self):
- class Case(TestCase):
- def test(this):
- this.addDetail("foo", self.get_content())
- self.assertDetailsProvided(Case("test"), "addSuccess",
- ["foo"])
-
- def test_addUnexpectedSuccess(self):
- class Case(TestCase):
- def test(this):
- this.addDetail("foo", self.get_content())
- raise testcase._UnexpectedSuccess()
- self.assertDetailsProvided(Case("test"), "addUnexpectedSuccess",
- ["foo"])
-
-
-class TestSetupTearDown(TestCase):
-
- def test_setUpNotCalled(self):
- class DoesnotcallsetUp(TestCase):
- def setUp(self):
- pass
- def test_method(self):
- pass
- result = unittest.TestResult()
- DoesnotcallsetUp('test_method').run(result)
- self.assertEqual(1, len(result.errors))
-
- def test_tearDownNotCalled(self):
- class DoesnotcalltearDown(TestCase):
- def test_method(self):
- pass
- def tearDown(self):
- pass
- result = unittest.TestResult()
- DoesnotcalltearDown('test_method').run(result)
- self.assertEqual(1, len(result.errors))
-
-
-class TestSkipping(TestCase):
- """Tests for skipping of tests functionality."""
-
- def test_skip_causes_skipException(self):
- self.assertRaises(self.skipException, self.skip, "Skip this test")
-
- def test_skip_without_reason_works(self):
- class Test(TestCase):
- def test(self):
- raise self.skipException()
- case = Test("test")
- result = ExtendedTestResult()
- case.run(result)
- self.assertEqual('addSkip', result._events[1][0])
- self.assertEqual('no reason given.',
- ''.join(result._events[1][2]['reason'].iter_text()))
-
- def test_skipException_in_setup_calls_result_addSkip(self):
- class TestThatRaisesInSetUp(TestCase):
- def setUp(self):
- TestCase.setUp(self)
- self.skip("skipping this test")
- def test_that_passes(self):
- pass
- calls = []
- result = LoggingResult(calls)
- test = TestThatRaisesInSetUp("test_that_passes")
- test.run(result)
- case = result._events[0][1]
- self.assertEqual([('startTest', case),
- ('addSkip', case, "Text attachment: reason\n------------\n"
- "skipping this test\n------------\n"), ('stopTest', case)],
- calls)
-
- def test_skipException_in_test_method_calls_result_addSkip(self):
- class SkippingTest(TestCase):
- def test_that_raises_skipException(self):
- self.skip("skipping this test")
- result = Python27TestResult()
- test = SkippingTest("test_that_raises_skipException")
- test.run(result)
- case = result._events[0][1]
- self.assertEqual([('startTest', case),
- ('addSkip', case, "Text attachment: reason\n------------\n"
- "skipping this test\n------------\n"), ('stopTest', case)],
- result._events)
-
- def test_skip__in_setup_with_old_result_object_calls_addSuccess(self):
- class SkippingTest(TestCase):
- def setUp(self):
- TestCase.setUp(self)
- raise self.skipException("skipping this test")
- def test_that_raises_skipException(self):
- pass
- result = Python26TestResult()
- test = SkippingTest("test_that_raises_skipException")
- test.run(result)
- self.assertEqual('addSuccess', result._events[1][0])
-
- def test_skip_with_old_result_object_calls_addError(self):
- class SkippingTest(TestCase):
- def test_that_raises_skipException(self):
- raise self.skipException("skipping this test")
- result = Python26TestResult()
- test = SkippingTest("test_that_raises_skipException")
- test.run(result)
- self.assertEqual('addSuccess', result._events[1][0])
-
- def test_skip_decorator(self):
- class SkippingTest(TestCase):
- @skip("skipping this test")
- def test_that_is_decorated_with_skip(self):
- self.fail()
- result = Python26TestResult()
- test = SkippingTest("test_that_is_decorated_with_skip")
- test.run(result)
- self.assertEqual('addSuccess', result._events[1][0])
-
- def test_skipIf_decorator(self):
- class SkippingTest(TestCase):
- @skipIf(True, "skipping this test")
- def test_that_is_decorated_with_skipIf(self):
- self.fail()
- result = Python26TestResult()
- test = SkippingTest("test_that_is_decorated_with_skipIf")
- test.run(result)
- self.assertEqual('addSuccess', result._events[1][0])
-
- def test_skipUnless_decorator(self):
- class SkippingTest(TestCase):
- @skipUnless(False, "skipping this test")
- def test_that_is_decorated_with_skipUnless(self):
- self.fail()
- result = Python26TestResult()
- test = SkippingTest("test_that_is_decorated_with_skipUnless")
- test.run(result)
- self.assertEqual('addSuccess', result._events[1][0])
-
-
-class TestOnException(TestCase):
-
- def test_default_works(self):
- events = []
- class Case(TestCase):
- def method(self):
- self.onException(an_exc_info)
- events.append(True)
- case = Case("method")
- case.run()
- self.assertThat(events, Equals([True]))
-
- def test_added_handler_works(self):
- events = []
- class Case(TestCase):
- def method(self):
- self.addOnException(events.append)
- self.onException(an_exc_info)
- case = Case("method")
- case.run()
- self.assertThat(events, Equals([an_exc_info]))
-
- def test_handler_that_raises_is_not_caught(self):
- events = []
- class Case(TestCase):
- def method(self):
- self.addOnException(events.index)
- self.assertRaises(ValueError, self.onException, an_exc_info)
- case = Case("method")
- case.run()
- self.assertThat(events, Equals([]))
-
-
-def test_suite():
- from unittest import TestLoader
- return TestLoader().loadTestsFromName(__name__)
diff --git a/lib/subunit/python/testtools/testsuite.py b/lib/subunit/python/testtools/testsuite.py
deleted file mode 100644
index 26b193799b..0000000000
--- a/lib/subunit/python/testtools/testsuite.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright (c) 2009 Jonathan M. Lange. See LICENSE for details.
-
-"""Test suites and related things."""
-
-__metaclass__ = type
-__all__ = [
- 'ConcurrentTestSuite',
- ]
-
-try:
- import Queue
-except ImportError:
- import queue as Queue
-import threading
-import unittest
-
-import testtools
-
-
-class ConcurrentTestSuite(unittest.TestSuite):
- """A TestSuite whose run() calls out to a concurrency strategy."""
-
- def __init__(self, suite, make_tests):
- """Create a ConcurrentTestSuite to execute suite.
-
- :param suite: A suite to run concurrently.
- :param make_tests: A helper function to split the tests in the
- ConcurrentTestSuite into some number of concurrently executing
- sub-suites. make_tests must take a suite, and return an iterable
- of TestCase-like object, each of which must have a run(result)
- method.
- """
- super(ConcurrentTestSuite, self).__init__([suite])
- self.make_tests = make_tests
-
- def run(self, result):
- """Run the tests concurrently.
-
- This calls out to the provided make_tests helper, and then serialises
- the results so that result only sees activity from one TestCase at
- a time.
-
- ConcurrentTestSuite provides no special mechanism to stop the tests
- returned by make_tests, it is up to the make_tests to honour the
- shouldStop attribute on the result object they are run with, which will
- be set if an exception is raised in the thread which
- ConcurrentTestSuite.run is called in.
- """
- tests = self.make_tests(self)
- try:
- threads = {}
- queue = Queue.Queue()
- result_semaphore = threading.Semaphore(1)
- for test in tests:
- process_result = testtools.ThreadsafeForwardingResult(result,
- result_semaphore)
- reader_thread = threading.Thread(
- target=self._run_test, args=(test, process_result, queue))
- threads[test] = reader_thread, process_result
- reader_thread.start()
- while threads:
- finished_test = queue.get()
- threads[finished_test][0].join()
- del threads[finished_test]
- except:
- for thread, process_result in threads.values():
- process_result.stop()
- raise
-
- def _run_test(self, test, process_result, queue):
- try:
- test.run(process_result)
- finally:
- queue.put(test)
diff --git a/lib/subunit/python/testtools/utils.py b/lib/subunit/python/testtools/utils.py
deleted file mode 100644
index c0845b610c..0000000000
--- a/lib/subunit/python/testtools/utils.py
+++ /dev/null
@@ -1,39 +0,0 @@
-# Copyright (c) 2008 Jonathan M. Lange. See LICENSE for details.
-
-"""Utilities for dealing with stuff in unittest."""
-
-
-import sys
-
-__metaclass__ = type
-__all__ = [
- 'iterate_tests',
- ]
-
-
-if sys.version_info > (3, 0):
- def _u(s):
- """Replacement for u'some string' in Python 3."""
- return s
- def _b(s):
- """A byte literal."""
- return s.encode("latin-1")
- advance_iterator = next
-else:
- def _u(s):
- return unicode(s, "latin-1")
- def _b(s):
- return s
- advance_iterator = lambda it: it.next()
-
-
-def iterate_tests(test_suite_or_case):
- """Iterate through all of the test cases in 'test_suite_or_case'."""
- try:
- suite = iter(test_suite_or_case)
- except TypeError:
- yield test_suite_or_case
- else:
- for test in suite:
- for subtest in iterate_tests(test):
- yield subtest
diff --git a/lib/subunit/runtests.py b/lib/subunit/runtests.py
new file mode 100755
index 0000000000..8ecc6cd3fb
--- /dev/null
+++ b/lib/subunit/runtests.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+# -*- Mode: python -*-
+#
+# Copyright (C) 2004 Canonical.com
+# Author: Robert Collins <robert.collins@canonical.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+#
+
+import unittest
+from subunit.tests.TestUtil import TestVisitor, TestSuite
+import subunit
+import sys
+import os
+import shutil
+import logging
+
+class ParameterisableTextTestRunner(unittest.TextTestRunner):
+ """I am a TextTestRunner whose result class is
+ parameterisable without further subclassing"""
+ def __init__(self, **args):
+ unittest.TextTestRunner.__init__(self, **args)
+ self._resultFactory=None
+ def resultFactory(self, *args):
+ """set or retrieve the result factory"""
+ if args:
+ self._resultFactory=args[0]
+ return self
+ if self._resultFactory is None:
+ self._resultFactory=unittest._TextTestResult
+ return self._resultFactory
+
+ def _makeResult(self):
+ return self.resultFactory()(self.stream, self.descriptions, self.verbosity)
+
+
+class EarlyStoppingTextTestResult(unittest._TextTestResult):
+ """I am a TextTestResult that can optionally stop at the first failure
+ or error"""
+
+ def addError(self, test, err):
+ unittest._TextTestResult.addError(self, test, err)
+ if self.stopOnError():
+ self.stop()
+
+ def addFailure(self, test, err):
+ unittest._TextTestResult.addError(self, test, err)
+ if self.stopOnFailure():
+ self.stop()
+
+ def stopOnError(self, *args):
+ """should this result indicate an abort when an error occurs?
+ TODO parameterise this"""
+ return True
+
+ def stopOnFailure(self, *args):
+ """should this result indicate an abort when a failure error occurs?
+ TODO parameterise this"""
+ return True
+
+
+def earlyStopFactory(*args, **kwargs):
+ """return a an early stopping text test result"""
+ result=EarlyStoppingTextTestResult(*args, **kwargs)
+ return result
+
+
+class ShellTests(subunit.ExecTestCase):
+
+ def test_sourcing(self):
+ """./shell/tests/test_source_library.sh"""
+
+ def test_functions(self):
+ """./shell/tests/test_function_output.sh"""
+
+
+def test_suite():
+ result = TestSuite()
+ result.addTest(subunit.test_suite())
+ result.addTest(ShellTests('test_sourcing'))
+ result.addTest(ShellTests('test_functions'))
+ return result
+
+
+class filteringVisitor(TestVisitor):
+ """I accrue all the testCases I visit that pass a regexp filter on id
+ into my suite
+ """
+
+ def __init__(self, filter):
+ import re
+ TestVisitor.__init__(self)
+ self._suite=None
+ self.filter=re.compile(filter)
+
+ def suite(self):
+ """answer the suite we are building"""
+ if self._suite is None:
+ self._suite=TestSuite()
+ return self._suite
+
+ def visitCase(self, aCase):
+ if self.filter.match(aCase.id()):
+ self.suite().addTest(aCase)
+
+
+def main(argv):
+ """To parameterise what tests are run, run this script like so:
+ python test_all.py REGEX
+ i.e.
+ python test_all.py .*Protocol.*
+ to run all tests with Protocol in their id."""
+ if len(argv) > 1:
+ pattern = argv[1]
+ else:
+ pattern = ".*"
+ visitor = filteringVisitor(pattern)
+ test_suite().visit(visitor)
+ runner = ParameterisableTextTestRunner(verbosity=2)
+ runner.resultFactory(unittest._TextTestResult)
+ if not runner.run(visitor.suite()).wasSuccessful():
+ return 1
+ return 0
+
+if __name__ == '__main__':
+ sys.exit(main(sys.argv))
diff --git a/lib/subunit/shell/README b/lib/subunit/shell/README
new file mode 100644
index 0000000000..af894a2bd3
--- /dev/null
+++ b/lib/subunit/shell/README
@@ -0,0 +1,62 @@
+#
+# subunit shell bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+This tree contains shell bindings to the subunit protocol. They are written
+entirely in shell, and unit tested in shell. See the tests/ directory for the
+test scripts. You can use `make check` to run the tests. There is a trivial
+python test_shell.py which uses the pyunit gui to expose the test results in a
+compact form.
+
+The shell bindings consist of four functions which you can use to output test
+metadata trivially. See share/subunit.sh for the functions and comments.
+
+However, this is not a full test environment, its support code for reporting to
+subunit. You can look at ShUnit (http://shunit.sourceforge.net) for 'proper'
+shell based xUnit functionality. There is a patch for ShUnit 1.3
+(subunit-ui.patch) in the subunit source tree. I hope to have that integrated
+upstream in the near future. I will delete the copy of the patch in the subunit
+tree a release or two later.
+
+If you are a test environment maintainer - either homegrown, or ShUnit or some
+such, you will need to see how the subunit calls should be used. Here is what
+a manually written test using the bindings might look like:
+
+
+subunit_start_test "test name"
+# determine if test passes or fails
+result=$(something)
+if [ $result == 0 ]; then
+ subunit_pass_test "test name"
+else
+ subunit_fail_test "test name" <<END
+Something went wrong running something:
+exited with result: '$func_status'
+END
+fi
+
+Which when run with a subunit test runner will generate something like:
+test name ... ok
+
+on success, and:
+
+test name ... FAIL
+
+======================================================================
+FAIL: test name
+----------------------------------------------------------------------
+RemoteError:
+Something went wrong running something:
+exited with result: '1'
diff --git a/lib/subunit/shell/share/subunit.sh b/lib/subunit/shell/share/subunit.sh
new file mode 100644
index 0000000000..82737276b8
--- /dev/null
+++ b/lib/subunit/shell/share/subunit.sh
@@ -0,0 +1,56 @@
+#
+# subunit.sh: shell functions to report test status via the subunit protocol.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+subunit_start_test () {
+ # emit the current protocol start-marker for test $1
+ echo "test: $1"
+}
+
+
+subunit_pass_test () {
+ # emit the current protocol test passed marker for test $1
+ echo "success: $1"
+}
+
+
+subunit_fail_test () {
+ # emit the current protocol fail-marker for test $1, and emit stdin as
+ # the error text.
+ # we use stdin because the failure message can be arbitrarily long, and this
+ # makes it convenient to write in scripts (using <<END syntax.
+ echo "failure: $1 ["
+ cat -
+ echo "]"
+}
+
+
+subunit_error_test () {
+ # emit the current protocol error-marker for test $1, and emit stdin as
+ # the error text.
+ # we use stdin because the failure message can be arbitrarily long, and this
+ # makes it convenient to write in scripts (using <<END syntax.
+ echo "error: $1 ["
+ cat -
+ echo "]"
+}
+
+
+subunit_skip_test () {
+ # emit the current protocol test skipped marker for test $1
+ echo "skip: $1"
+}
+
+
diff --git a/lib/subunit/shell/tests/test_function_output.sh b/lib/subunit/shell/tests/test_function_output.sh
new file mode 100755
index 0000000000..b78eee6946
--- /dev/null
+++ b/lib/subunit/shell/tests/test_function_output.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+# subunit shell bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+
+# this script tests the output of the methods. As each is tested we start using
+# it.
+# So the first test manually implements the entire protocol, the next uses the
+# start method and so on.
+# it is assumed that we are running from the 'shell' tree root in the source
+# of subunit, and that the library sourcing tests have all passed - if they
+# have not, this test script may well fail strangely.
+
+# import the library.
+. ${SHELL_SHARE}subunit.sh
+
+echo 'test: subunit_start_test output'
+func_output=$(subunit_start_test "foo bar")
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xtest: foo bar" ]; then
+ echo 'success: subunit_start_test output'
+else
+ echo 'failure: subunit_start_test output ['
+ echo 'got an error code or incorrect output:'
+ echo "exit: $func_status"
+ echo "output: '$func_output'"
+ echo ']' ;
+fi
+
+subunit_start_test "subunit_pass_test output"
+func_output=$(subunit_pass_test "foo bar")
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xsuccess: foo bar" ]; then
+ subunit_pass_test "subunit_pass_test output"
+else
+ echo 'failure: subunit_pass_test output ['
+ echo 'got an error code or incorrect output:'
+ echo "exit: $func_status"
+ echo "output: '$func_output'"
+ echo ']' ;
+fi
+
+subunit_start_test "subunit_fail_test output"
+func_output=$(subunit_fail_test "foo bar" <<END
+something
+ wrong
+here
+END
+)
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xfailure: foo bar [
+something
+ wrong
+here
+]" ]; then
+ subunit_pass_test "subunit_fail_test output"
+else
+ echo 'failure: subunit_fail_test output ['
+ echo 'got an error code or incorrect output:'
+ echo "exit: $func_status"
+ echo "output: '$func_output'"
+ echo ']' ;
+fi
+
+subunit_start_test "subunit_error_test output"
+func_output=$(subunit_error_test "foo bar" <<END
+something
+ died
+here
+END
+)
+func_status=$?
+if [ $func_status == 0 -a "x$func_output" = "xerror: foo bar [
+something
+ died
+here
+]" ]; then
+ subunit_pass_test "subunit_error_test output"
+else
+ subunit_fail_test "subunit_error_test output" <<END
+got an error code or incorrect output:
+exit: $func_status
+output: '$func_output'
+END
+fi
diff --git a/lib/subunit/shell/tests/test_source_library.sh b/lib/subunit/shell/tests/test_source_library.sh
new file mode 100755
index 0000000000..699f1281bc
--- /dev/null
+++ b/lib/subunit/shell/tests/test_source_library.sh
@@ -0,0 +1,108 @@
+#!/bin/bash
+# subunit shell bindings.
+# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
+#
+# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
+# license at the users choice. A copy of both licenses are available in the
+# project source as Apache-2.0 and BSD. You may not use this file except in
+# compliance with one of these two licences.
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# license you chose for the specific language governing permissions and
+# limitations under that license.
+#
+
+
+# this script tests that we can source the subunit shell bindings successfully.
+# It manually implements the control protocol so that it des not depend on the
+# bindings being complete yet.
+
+# we expect to be run from the tree root.
+
+echo 'test: shell bindings can be sourced'
+# if any output occurs, this has failed to source cleanly
+source_output=$(. ${SHELL_SHARE}subunit.sh 2>&1)
+if [ $? == 0 -a "x$source_output" = "x" ]; then
+ echo 'success: shell bindings can be sourced'
+else
+ echo 'failure: shell bindings can be sourced ['
+ echo 'got an error code or output during sourcing.:'
+ echo $source_output
+ echo ']' ;
+fi
+
+# now source it for real
+. ${SHELL_SHARE}subunit.sh
+
+# we should have a start_test function
+echo 'test: subunit_start_test exists'
+found_type=$(type -t subunit_start_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_start_test exists'
+else
+ echo 'failure: subunit_start_test exists ['
+ echo 'subunit_start_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
+# we should have a pass_test function
+echo 'test: subunit_pass_test exists'
+found_type=$(type -t subunit_pass_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_pass_test exists'
+else
+ echo 'failure: subunit_pass_test exists ['
+ echo 'subunit_pass_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
+# we should have a fail_test function
+echo 'test: subunit_fail_test exists'
+found_type=$(type -t subunit_fail_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_fail_test exists'
+else
+ echo 'failure: subunit_fail_test exists ['
+ echo 'subunit_fail_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
+# we should have a error_test function
+echo 'test: subunit_error_test exists'
+found_type=$(type -t subunit_error_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_error_test exists'
+else
+ echo 'failure: subunit_error_test exists ['
+ echo 'subunit_error_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
+# we should have a skip_test function
+echo 'test: subunit_skip_test exists'
+found_type=$(type -t subunit_skip_test)
+status=$?
+if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
+ echo 'success: subunit_skip_test exists'
+else
+ echo 'failure: subunit_skip_test exists ['
+ echo 'subunit_skip_test is not a function:'
+ echo "type -t status: $status"
+ echo "output: $found_type"
+ echo ']' ;
+fi
+
diff --git a/lib/subunit/update.sh b/lib/subunit/update.sh
deleted file mode 100755
index f8265b188c..0000000000
--- a/lib/subunit/update.sh
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/bin/sh
-# Pull in a new snapshot of Subunit from the upstream bzr branch
-
-TARGETDIR="`dirname $0`"
-WORKDIR="`mktemp -d`"
-bzr export "$WORKDIR/subunit" lp:subunit
-bzr export "$WORKDIR/testtools" lp:testtools
-
-for p in python/ filters/tap2subunit;
-do
- rsync -avz --delete "$WORKDIR/subunit/$p" "$TARGETDIR/$p"
-done
-
-rsync -avz --delete "$WORKDIR/testtools/testtools/" "$TARGETDIR/python/testtools/"
-
-rm -rf "$WORKDIR"