image_test: switch to @unittest.expectedFailure
The only point of the Forgiving/NonForgiving is so that forgiving tests
don't halt execution prematurely relative to non-forgiving tests, and so
the final exit status only reflects the non-forgiving tests.
The unittest module already provides an attribute that does this for us:
@unittest.expectedFailure. Any tests marked with this will not halt the
overall execution and won't affect the exit status. Replace our custom
infrastructure with this one attribute to simplify things greatly w/out
losing any functionality.
BUG=chromium:775932
TEST=test_image & unittests still pass
Change-Id: I4a1a789e928d43e86605f3f40169d2e14e166b73
Reviewed-on: https://chromium-review.googlesource.com/726894
Commit-Ready: Mike Frysinger <vapier@chromium.org>
Tested-by: Mike Frysinger <vapier@chromium.org>
Reviewed-by: Jorge Lucangeli Obes <jorgelo@chromium.org>
diff --git a/scripts/test_image.py b/scripts/test_image.py
index f775c7d..a86abe8 100644
--- a/scripts/test_image.py
+++ b/scripts/test_image.py
@@ -12,7 +12,6 @@
from chromite.lib import constants
from chromite.lib import commandline
-from chromite.lib import cros_logging as logging
from chromite.lib import image_test_lib
from chromite.lib import osutils
from chromite.lib import path_util
@@ -68,14 +67,6 @@
# image tests automatically because they depend on a proper environment.
loader.testMethodPrefix = 'Test'
all_tests = loader.loadTestsFromName('chromite.cros.test.image_test')
- forgiving = image_test_lib.ImageTestSuite()
- non_forgiving = image_test_lib.ImageTestSuite()
- for suite in all_tests:
- for test in suite.GetTests():
- if test.IsForgiving():
- forgiving.addTest(test)
- else:
- non_forgiving.addTest(test)
# Run them in the image directory.
runner = image_test_lib.ImageTestRunner()
@@ -86,12 +77,7 @@
with osutils.TempDir(base_dir=tmp_in_chroot) as temp_dir:
with osutils.MountImageContext(image_file, temp_dir):
with osutils.ChdirContext(temp_dir):
- # Run non-forgiving tests first so that exceptions in forgiving tests
- # do not skip any required tests.
- logging.info('Running NON-forgiving tests.')
- result = runner.run(non_forgiving)
- logging.info('Running forgiving tests.')
- runner.run(forgiving)
+ result = runner.run(all_tests)
if result and not result.wasSuccessful():
return 1