-
-
Notifications
You must be signed in to change notification settings - Fork 162
Expand file tree
/
Copy pathsystemtests.py
More file actions
100 lines (83 loc) · 4.12 KB
/
systemtests.py
File metadata and controls
100 lines (83 loc) · 4.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
import argparse
from pathlib import Path
from systemtests.SystemtestArguments import SystemtestArguments
from systemtests.Systemtest import Systemtest, display_systemtestresults_as_table
from systemtests.TestSuite import TestSuites
from metadata_parser.metdata import Tutorials, Case
import logging
import time
from paths import PRECICE_TUTORIAL_DIR, PRECICE_TESTS_RUN_DIR, PRECICE_TESTS_DIR
def main():
parser = argparse.ArgumentParser(description='systemtest')
# Add an argument for the components
parser.add_argument('--suites', type=str,
help='Comma-separated test-suites to execute')
parser.add_argument(
'--build_args',
type=str,
help='Comma-separated list of arguments provided to the components like openfoam:2102,pythonbindings:latest')
parser.add_argument('--rundir', type=str, help='Directory to run the systemstests in.',
nargs='?', const=PRECICE_TESTS_RUN_DIR, default=PRECICE_TESTS_RUN_DIR)
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='Set the logging level')
# Parse the command-line arguments
args = parser.parse_args()
# Configure logging based on the provided log level
logging.basicConfig(level=args.log_level, format='%(levelname)s: %(message)s')
print(f"Using log-level: {args.log_level}")
systemtests_to_run = []
available_tutorials = Tutorials.from_path(PRECICE_TUTORIAL_DIR)
build_args = SystemtestArguments.from_args(args.build_args)
run_directory = Path(args.rundir)
if args.suites:
test_suites_requested = args.suites.split(',')
available_testsuites = TestSuites.from_yaml(
PRECICE_TESTS_DIR / "tests.yaml", available_tutorials)
test_suites_to_execute = []
for test_suite_requested in test_suites_requested:
test_suite_found = available_testsuites.get_by_name(
test_suite_requested)
if not test_suite_found:
logging.error(f"Did not find the testsuite with name {test_suite_requested}")
else:
test_suites_to_execute.append(test_suite_found)
if not test_suites_to_execute:
raise RuntimeError(
f"No matching test suites with names {test_suites_requested} found. Use print_test_suites.py to get an overview")
# now convert the test_suites into systemtests
for test_suite in test_suites_to_execute:
tutorials = test_suite.cases_of_tutorial.keys()
for tutorial in tutorials:
cases = test_suite.cases_of_tutorial[tutorial]
reference_results = test_suite.reference_results[tutorial]
max_times = test_suite.max_times.get(
tutorial, [None] * len(cases))
for case, reference_result, max_time in zip(
cases, reference_results, max_times):
systemtests_to_run.append(
Systemtest(tutorial, build_args, case, reference_result, max_time=max_time))
if not systemtests_to_run:
raise RuntimeError("Did not find any Systemtests to execute.")
logging.info(f"About to run the following systemtest in the directory {run_directory}:\n {systemtests_to_run}")
results = []
for number, systemtest in enumerate(systemtests_to_run, start=1):
logging.info(f"Started running {systemtest}, {number}/{len(systemtests_to_run)}")
t = time.perf_counter()
result = systemtest.run(run_directory)
elapsed_time = time.perf_counter() - t
logging.info(f"Running {systemtest} took {elapsed_time:^.1f} seconds")
results.append(result)
system_test_success = True
for result in results:
if not result.success:
logging.error(f"Failed to run {result.systemtest}")
system_test_success = False
else:
logging.info(f"Success running {result.systemtest}")
display_systemtestresults_as_table(results)
if system_test_success:
exit(0)
else:
exit(1)
if __name__ == '__main__':
main()