-
-
Notifications
You must be signed in to change notification settings - Fork 162
Expand file tree
/
Copy pathgenerate_reference_results.py
More file actions
169 lines (139 loc) · 7.25 KB
/
generate_reference_results.py
File metadata and controls
169 lines (139 loc) · 7.25 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import argparse
from metadata_parser.metdata import Tutorials, ReferenceResult
from systemtests.TestSuite import TestSuites
from systemtests.SystemtestArguments import SystemtestArguments
from systemtests.Systemtest import Systemtest
from pathlib import Path
from typing import List
from paths import PRECICE_TESTS_DIR, PRECICE_TUTORIAL_DIR
import hashlib
from jinja2 import Environment, FileSystemLoader
import tarfile
import subprocess
from datetime import datetime
import logging
from paths import PRECICE_TUTORIAL_DIR, PRECICE_TESTS_RUN_DIR, PRECICE_TESTS_DIR, PRECICE_REL_OUTPUT_DIR
import time
import json
def create_tar_gz(source_folder: Path, output_filename: Path):
with tarfile.open(output_filename, "w:gz") as tar:
tar.add(source_folder, arcname=output_filename.name.replace(".tar.gz", ""))
def get_machine_informations():
def command_is_avail(command: str):
try:
rc = subprocess.call(['which', command], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except FileNotFoundError:
return False
return rc == 0
uname_info = "uname not available on the machine the systemtests were executed."
lscpu_info = "lscpu not available on the machine the systemtests were executed."
if (command_is_avail("uname")):
result = subprocess.run(["uname", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode == 0:
uname_info = result.stdout
if (command_is_avail("lscpu") and command_is_avail("grep")):
result_lscpu = subprocess.run(["lscpu"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
result = subprocess.run(["grep", "-v", "Vulner"], input=result_lscpu.stdout,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
if result.returncode == 0:
lscpu_info = result.stdout
return (uname_info, lscpu_info)
def render_reference_results_info(
reference_results: List[ReferenceResult],
arguments_used: SystemtestArguments,
time: str):
def sha256sum(filename):
# Implementation from https://stackoverflow.com/a/44873382/2254346,
# compatible with Python 3.10.
h = hashlib.sha256()
mv = memoryview(bytearray(128 * 1024))
with open(filename, 'rb', buffering=0) as f:
while n := f.readinto(mv):
h.update(mv[:n])
return h.hexdigest()
files = []
for reference_result in reference_results:
files.append({
'sha256': sha256sum(reference_result.path),
'time': time,
'name': reference_result.path.name,
})
uname, lscpu = get_machine_informations()
render_dict = {
'arguments': arguments_used.arguments,
'files': files,
'uname': uname,
'lscpu': lscpu,
}
jinja_env = Environment(loader=FileSystemLoader(PRECICE_TESTS_DIR))
template = jinja_env.get_template("reference_results.metadata.template")
return template.render(render_dict)
def main():
parser = argparse.ArgumentParser(description='Generate reference data for systemtests')
parser.add_argument('--rundir', type=str, help='Directory to run the systemstests in.',
nargs='?', const=PRECICE_TESTS_RUN_DIR, default=PRECICE_TESTS_RUN_DIR)
parser.add_argument('--log-level', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='Set the logging level')
args = parser.parse_args()
logging.basicConfig(level=args.log_level, format='%(levelname)s: %(message)s')
print(f"Using log-level: {args.log_level}")
run_directory = Path(args.rundir)
available_tutorials = Tutorials.from_path(PRECICE_TUTORIAL_DIR)
test_suites = TestSuites.from_yaml(PRECICE_TESTS_DIR / "tests.yaml", available_tutorials)
# Read in parameters
build_args = SystemtestArguments.from_yaml(PRECICE_TESTS_DIR / "reference_versions.yaml")
systemtests_to_run = set()
for test_suite in test_suites:
tutorials = test_suite.cases_of_tutorial.keys()
for tutorial in tutorials:
cases = test_suite.cases_of_tutorial[tutorial]
reference_results = test_suite.reference_results[tutorial]
max_times = test_suite.max_times.get(tutorial, [None] * len(cases))
for case, reference_result, max_time in zip(
cases, reference_results, max_times):
systemtests_to_run.add(
Systemtest(tutorial, build_args, case, reference_result, max_time=max_time))
reference_result_per_tutorial = {}
current_time_string = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
logging.info(f"About to run the following tests {systemtests_to_run}")
for number, systemtest in enumerate(systemtests_to_run, start=1):
logging.info(f"Started running {systemtest}, {number}/{len(systemtests_to_run)}")
t = time.perf_counter()
result = systemtest.run_for_reference_results(run_directory)
elapsed_time = time.perf_counter() - t
logging.info(f"Running {systemtest} took {elapsed_time:^.1f} seconds")
if not result.success:
raise RuntimeError(f"Failed to execute {systemtest}")
reference_result_per_tutorial[systemtest.tutorial] = []
# Put the tar.gz in there
for systemtest in systemtests_to_run:
reference_result_folder = systemtest.get_system_test_dir() / PRECICE_REL_OUTPUT_DIR
reference_result_per_tutorial[systemtest.tutorial].append(systemtest.reference_result)
# create folder if needed
systemtest.reference_result.path.parent.mkdir(parents=True, exist_ok=True)
if reference_result_folder.exists():
create_tar_gz(reference_result_folder, systemtest.reference_result.path)
else:
raise RuntimeError(
f"Error executing: \n {systemtest} \n Could not find result folder {reference_result_folder}\n Probably the tutorial did not run through properly. Please check corresponding logs")
# Write iterations.log hashes sidecar for implicit-coupling regression checks (issue #440)
collected = systemtest._collect_iterations_logs(systemtest.get_system_test_dir())
if collected:
hashes = {
rel: Systemtest._sha256_file(p) for rel, p in collected
}
sidecar = systemtest.reference_result.path.with_suffix(".iterations-hashes.json")
sidecar.write_text(json.dumps(hashes, sort_keys=True, indent=2))
logging.info(f"Wrote iterations hashes for {systemtest.reference_result.path.name}")
# write readme
for tutorial in reference_result_per_tutorial.keys():
reference_results_dir = tutorial.path / "reference-results"
reference_results_dir.mkdir(parents=True, exist_ok=True)
with open(reference_results_dir / "reference_results.metadata", 'w') as file:
ref_results_info = render_reference_results_info(
reference_result_per_tutorial[tutorial], build_args, current_time_string)
logging.info(f"Writing results for {tutorial.name}")
file.write(ref_results_info)
logging.info(f"Done. Please make sure to manually have a look into the reference results before making a PR.")
if __name__ == '__main__':
main()