Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dynamic host test loader (from directory location) - support #33

Merged
merged 7 commits into from
Nov 20, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ pip-log.txt
.coverage
.tox
nosetests.xml
htmlcov/

# Translations
*.mo
Expand Down
82 changes: 52 additions & 30 deletions mbed_greentea/mbed_greentea_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,10 @@
import os
import sys
import optparse
import threading
from time import time, sleep
from Queue import Queue
from threading import Thread


from mbed_greentea.mbed_test_api import run_host_test
from mbed_greentea.mbed_test_api import TEST_RESULTS
Expand All @@ -41,8 +43,6 @@
from mbed_greentea.mbed_greentea_dlm import greentea_clean_kettle
from mbed_greentea.mbed_yotta_api import build_with_yotta

from Queue import Queue
from threading import Thread

try:
import mbed_lstools
Expand All @@ -55,7 +55,19 @@

RET_NO_DEVICES = 1001
RET_YOTTA_BUILD_FAIL = -1

LOCAL_HOST_TESTS_DIR = './test/host_tests' # Used by mbedhtrun -e <dir>

def get_local_host_tests_dir(path):
"""! Forms path to local host tests. Performs additional basic checks if directory exists etc.
"""
# If specified path exist return path
if path and os.path.exists(path) and os.path.isdir(path):
return path
# If specified path is not set or doesn't exist returns default path
if not path and os.path.exists(LOCAL_HOST_TESTS_DIR) and os.path.isdir(LOCAL_HOST_TESTS_DIR):
return LOCAL_HOST_TESTS_DIR
return None

def print_version(verbose=True):
"""! Print current package version
"""
Expand Down Expand Up @@ -99,8 +111,12 @@ def main():
parser.add_option('', '--parallel',
dest='parallel_test_exec',
default=1,
help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')

help='Experimental, you execute test runners for connected to your host MUTs in parallel (speeds up test result collection)')

parser.add_option("-e", "--enum-host-tests",
dest="enum_host_tests",
help="Define directory with yotta module local host tests. Default: ./test/host_tests")

parser.add_option('', '--config',
dest='verbose_test_configuration_only',
default=False,
Expand Down Expand Up @@ -195,7 +211,7 @@ def main():
(opts, args) = parser.parse_args()

cli_ret = 0

start = time()
if opts.lock_by_target:
# We are using Greentea proprietary locking mechanism to lock between platforms and targets
Expand Down Expand Up @@ -238,14 +254,14 @@ def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_ta
test_platforms_match = 0
test_report = {}
#greentea_acquire_target_id(mut['target_id'], gt_instance_uuid)

while not test_queue.empty():
try:
test = test_queue.get(False)
except Exception as e:
print(str(e))
break

test_result = 'SKIPPED'

disk = mut['mount_point']
Expand All @@ -254,6 +270,7 @@ def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_ta
program_cycle_s = mut_info['properties']['program_cycle_s']
copy_method = opts.copy_method if opts.copy_method else 'shell'
verbose = opts.verbose_test_result_only
enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

test_platforms_match += 1
#gt_log_tab("running host test...")
Expand All @@ -265,6 +282,7 @@ def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_ta
program_cycle_s=program_cycle_s,
digest_source=opts.digest_source,
json_test_cfg=opts.json_test_configuration,
enum_host_tests_path=enum_host_tests_path,
verbose=verbose)

single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
Expand All @@ -286,20 +304,20 @@ def run_test_thread(test_result_queue, test_queue, opts, mut, mut_info, yotta_ta
test_report[yotta_target_name][test_name]['copy_method'] = copy_method

gt_log("test on hardware with target id: %s \n\ttest '%s' %s %s in %.2f sec"% (mut['target_id'], test['test_bin'], '.' * (80 - len(test['test_bin'])), test_result, single_testduration))

if single_test_result != 'OK' and not verbose and opts.report_fails:
# In some cases we want to print console to see why test failed
# even if we are not in verbose mode
gt_log_tab("test failed, reporting console output (specified with --report-fails option)")
print
print single_test_output
print single_test_output

#greentea_release_target_id(mut['target_id'], gt_instance_uuid)
test_result_queue.put({'test_platforms_match': test_platforms_match,
'test_exec_retcode': test_exec_retcode,
test_result_queue.put({'test_platforms_match': test_platforms_match,
'test_exec_retcode': test_exec_retcode,
'test_report': test_report})
return

def main_cli(opts, args, gt_instance_uuid=None):
"""! This is main CLI function with all command line parameters
@details This function also implements CLI workflow depending on CLI parameters inputed
Expand All @@ -326,10 +344,12 @@ def main_cli(opts, args, gt_instance_uuid=None):

# Capture alternative test console inputs, used e.g. in 'yotta test command'
if opts.digest_source:
enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)
host_test_result = run_host_test(image_path=None,
disk=None,
port=None,
digest_source=opts.digest_source,
enum_host_tests_path=enum_host_tests_path,
verbose=opts.verbose_test_result_only)

single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
Expand Down Expand Up @@ -450,7 +470,7 @@ def main_cli(opts, args, gt_instance_uuid=None):
muts_to_test = [] # MUTs to actually be tested
test_queue = Queue() # contains information about test_bin and image_path for each test case
test_result_queue = Queue() # used to store results of each thread
execute_threads = [] # list of threads to run test cases
execute_threads = [] # list of threads to run test cases

### check if argument of --parallel mode is a integer and greater or equal 1
try:
Expand All @@ -460,8 +480,8 @@ def main_cli(opts, args, gt_instance_uuid=None):
except ValueError:
gt_log_err("argument of mode --parallel is not a int, disable parallel mode")
parallel_test_exec = 1


### Testing procedures, for each target, for each target's compatible platform
for yotta_target_name in yt_target_platform_map:
gt_log("processing '%s' yotta target compatible platforms..."% gt_bright(yotta_target_name))
Expand All @@ -484,7 +504,7 @@ def main_cli(opts, args, gt_instance_uuid=None):
gt_log_tab("%s = '%s'"% (k, mbed_dev[k]))
if number_of_parallel_instances < parallel_test_exec:
number_of_parallel_instances += 1
else:
else:
break

# Configuration print mode:
Expand All @@ -503,7 +523,7 @@ def main_cli(opts, args, gt_instance_uuid=None):
micro = mut['platform_name']
program_cycle_s = mut_info_map[platfrom_name]['properties']['program_cycle_s']
copy_method = opts.copy_method if opts.copy_method else 'shell'
verbose = opts.verbose_test_result_only
enum_host_tests_path = get_local_host_tests_dir(opts.enum_host_tests)

test_platforms_match += 1
host_test_result = run_host_test(opts.run_app,
Expand All @@ -515,6 +535,7 @@ def main_cli(opts, args, gt_instance_uuid=None):
digest_source=opts.digest_source,
json_test_cfg=opts.json_test_configuration,
run_app=opts.run_app,
enum_host_tests_path=enum_host_tests_path,
verbose=True)

single_test_result, single_test_output, single_testduration, single_timeout = host_test_result
Expand Down Expand Up @@ -571,36 +592,37 @@ def main_cli(opts, args, gt_instance_uuid=None):
gt_log_tab("note: test case names are case sensitive")
gt_log_tab("note: see list of available test cases below")
list_binaries_for_targets(verbose_footer=False)

gt_log("running %d test%s for target '%s' and platform '%s'"% (
len(filtered_ctest_test_list),
"s" if len(filtered_ctest_test_list) != 1 else "",
gt_bright(yotta_target_name),
gt_bright(platform_name)
))

for test_bin, image_path in filtered_ctest_test_list.iteritems():
test = {"test_bin":test_bin, "image_path":image_path}
test_queue.put(test)

number_of_threads = 0
for mut in muts_to_test:
#################################################################
# Experimental, parallel test execution
#################################################################
if number_of_threads < parallel_test_exec:
t = threading.Thread(target=run_test_thread, args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name))
args = (test_result_queue, test_queue, opts, mut, mut_info, yotta_target_name)
t = Thread(target=run_test_thread, args=args)
execute_threads.append(t)
number_of_threads += 1
number_of_threads += 1

gt_log_tab("use %s instance%s for testing" % (len(execute_threads), 's' if len(execute_threads) != 1 else ''))
for t in execute_threads:
t.daemon = True
t.start()
while test_result_queue.qsize() != len(execute_threads):
sleep(1)
# merge partial test reports from diffrent threads to final test report
while test_result_queue.qsize() != len(execute_threads):
sleep(1)

# merge partial test reports from diffrent threads to final test report
for t in execute_threads:
t.join()
test_return_data = test_result_queue.get(False)
Expand All @@ -614,7 +636,7 @@ def main_cli(opts, args, gt_instance_uuid=None):
test_report.update(partial_test_report)
else:
test_report[report_key].update(partial_test_report[report_key])

if opts.verbose_test_configuration_only:
print
print "Example: execute 'mbedgt --target=TARGET_NAME' to start testing for TARGET_NAME target"
Expand Down
4 changes: 4 additions & 0 deletions mbed_greentea/mbed_test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ def run_host_test(image_path,
digest_source=None,
json_test_cfg=None,
max_failed_properties=5,
enum_host_tests_path=None,
run_app=None):
"""! This function runs host test supervisor (executes mbedhtrun) and checks output from host test process.
@return Tuple with test results, test output and test duration times
Expand All @@ -112,6 +113,7 @@ def run_host_test(image_path,
@param program_cycle_s Wait after flashing delay (sec)
@param json_test_cfg Additional test configuration file path passed to host tests in JSON format
@param max_failed_properties After how many unknown properties we will assume test is not ported
@param enum_host_tests_path Directory where locally defined host tests may reside
@param run_app Run application mode flag (we run application and grab serial port data)
@param digest_source if None mbedhtrun will be executed. If 'stdin',
stdin will be used via StdInObserver or file (if
Expand Down Expand Up @@ -250,6 +252,8 @@ def get_auto_property_value(property_name, line):
cmd += ["--test-cfg", '"%s"' % str(json_test_cfg)]
if run_app is not None:
cmd += ["--run"] # -f stores binary name!
if enum_host_tests_path:
cmd += ["-e", '"%s"'% enum_host_tests_path]

if verbose:
gt_log_tab("calling mbedhtrun: %s"% " ".join(cmd))
Expand Down
Loading