diff --git a/unit-tests/py/rspy/acroname.py b/unit-tests/py/rspy/acroname.py index 29ff2920b0..1f6bad334a 100644 --- a/unit-tests/py/rspy/acroname.py +++ b/unit-tests/py/rspy/acroname.py @@ -5,6 +5,8 @@ https://acroname.com/reference/python/index.html """ +from rspy import log + if __name__ == '__main__': import os, sys, getopt @@ -29,7 +31,7 @@ def usage(): try: import brainstem except ModuleNotFoundError: - print( '-E-', 'No acroname library is available!' ) + log.w( 'No acroname library is available!' ) raise hub = None diff --git a/unit-tests/py/rspy/log.py b/unit-tests/py/rspy/log.py index 243699d259..83c76406d4 100644 --- a/unit-tests/py/rspy/log.py +++ b/unit-tests/py/rspy/log.py @@ -18,8 +18,14 @@ def _stream_has_color( stream ): # guess false in case of error return False -if _stream_has_color( sys.stdout ): +_have_color = '--color' in sys.argv +if _have_color: + sys.argv.remove( '--color' ) +else: + _have_color = _stream_has_color( sys.stdout ) +if _have_color: red = '\033[91m' + yellow = '\033[93m' gray = '\033[90m' reset = '\033[0m' cr = '\033[G' @@ -36,12 +42,16 @@ def progress(*args): global _progress _progress = args else: - red = gray = reset = cr = clear_eos = '' + red = yellow = gray = reset = cr = clear_eos = '' def out(*args): print( *args ) def progress(*args): print( *args ) +def is_color_on(): + global _have_color + return _have_color + def quiet_on(): print( "QUIET ON" ) @@ -98,3 +108,20 @@ def reset_errors(): global _n_errors _n_errors = 0 + +# We track the number of warnings +_n_warnings = 0 +def w(*args): + global red, reset + out( yellow + '-W-' + reset, *args ) + global _n_warnings + _n_warnings = _n_warnings + 1 + +def n_warnings(): + global _n_warnings + return _n_warnings + +def reset_warnings(): + global _n_warnings + _n_warnings = 0 + diff --git a/unit-tests/py/rspy/test.py b/unit-tests/py/rspy/test.py index b4bf2a30d3..5a6681aa4b 100644 --- a/unit-tests/py/rspy/test.py +++ b/unit-tests/py/rspy/test.py @@ -83,11 +83,16 @@ def print_stack(): """ Function for printing the current call stack. Used when an assertion fails """ - test_py_path = os.sep + "unit-tests" + os.sep + "py" + os.sep + "test.py" - for line in traceback.format_stack(): - if test_py_path in line: # avoid printing the lines of calling to this function - continue - print(line) + print( 'Traceback (most recent call last):' ) + stack = traceback.format_stack() + # Avoid stack trace into format_stack(): + # File "C:/work/git/lrs\unit-tests\py\rspy\test.py", line 124, in check + # print_stack() + # File "C:/work/git/lrs\unit-tests\py\rspy\test.py", line 87, in print_stack + # stack = traceback.format_stack() + stack = stack[:-2] + for line in reversed( stack ): + print( line, end = '' ) # format_stack() adds \n """ The following functions are for asserting test cases: @@ -119,9 +124,9 @@ def check(exp, abort_if_failed = False): global n_assertions n_assertions += 1 if not exp: + print_stack() print("Check failed, received", exp) check_failed() - print_stack() if abort_if_failed: abort() return False @@ -144,9 +149,10 @@ def check_equal(result, expected, abort_if_failed = False): global n_assertions n_assertions += 1 if result != expected: - print("Result was:" + result + "\nBut we expected: " + expected) - check_failed() print_stack() + print( "Result was:", result ) + print( " expected:", expected ) + check_failed() if abort_if_failed: abort() return False @@ -194,10 +200,10 @@ def check_equal_lists(result, expected, abort_if_failed = False): print("The element of index", i, "in both lists was not equal") i += 1 if failed: + print_stack() print("Result list:", result) print("Expected list:", expected) check_failed() - print_stack() if abort_if_failed: abort() return False @@ -215,14 +221,13 @@ def check_exception(exception, expected_type, expected_msg = None, abort_if_fail """ failed = False if type(exception) != expected_type: - print("Raised exception was of type", type(exception), "and not of type", expected_type, "as expected") - failed = True + failed = [ "Raised exception was of type", type(exception), "and not of type", expected_type, "as expected" ] if expected_msg and str(exception) != expected_msg: - print("Exception had message:", str(exception), "\nBut we expected:", expected_msg) - failed = True + failed = [ "Exception had message:", str(exception), "\nBut we expected:", expected_msg ] if failed: - check_failed() print_stack() + print( *failed ) + check_failed() if abort_if_failed: abort() return False diff --git a/unit-tests/run-unit-tests.py b/unit-tests/run-unit-tests.py index a18d36fd01..2f8d645d62 100644 --- a/unit-tests/run-unit-tests.py +++ b/unit-tests/run-unit-tests.py @@ -334,6 +334,8 @@ def command(self): cmd += [self.path_to_script] if log.is_debug_on(): cmd += ['--debug'] + if log.is_color_on(): + cmd += ['--color'] return cmd def run_test( self ): @@ -461,6 +463,7 @@ def test_wrapper( test, configuration = None ): # Under Travis, we'll have no devices and no acroname skip_live_tests = len(devices.all()) == 0 and not devices.acroname # +log.reset_errors() for test in get_tests(): # if not test.is_live(): @@ -475,14 +478,14 @@ def test_wrapper( test, configuration = None ): try: devices.enable_only( serial_numbers, recycle = True ) except RuntimeError as e: - log.e( log.red + self.name + log.reset + ': ' + str(e) ) + log.w( log.red + self.name + log.reset + ': ' + str(e) ) else: test_wrapper( test, configuration ) log.progress() n_errors = log.n_errors() if n_errors: - log.out( log.red + str(n_errors) + log.reset + ' of ' + str(n_tests) + ' test(s) failed!' + log.clear_eos ) + log.out( log.red + str(n_errors) + log.reset, 'of', n_tests, 'test(s)', log.red + 'failed!' + log.reset + log.clear_eos ) sys.exit(1) # log.out( str(n_tests) + ' unit-test(s) completed successfully' + log.clear_eos )