Skip to content
44 changes: 44 additions & 0 deletions tools/tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,50 @@ The easiest way to debug a systemtest run is first to have a look at the output
If this does not provide enough hints, the next step is to download the generated `system_tests_run_<run_id>_<run_attempt>` artifact. Note that by default this will only be generated if the systemtests fail.
Inside the archive, a test-specific subfolder like `flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723` contains two log files: a `stderr.log` and `stdout.log`. This can be a starting point for a further investigation.

### Re-running system tests from CI artifacts

System test artifacts are produced by the GitHub Actions workflows as archives named
`system_tests_run_<run_id>_<run_attempt>`. Each archive contains a `runs/` folder with one
subdirectory per executed system test. Every such subdirectory is self-contained and can be
re-run locally using Docker.

To re-run a failing test locally:

1. Download `system_tests_run_<run_id>_<run_attempt>.zip` from the failed GitHub Actions run.
2. Extract the archive and change into the `runs/` directory:

```bash
unzip system_tests_run_<run_id>_<run_attempt>.zip
cd system_tests_run_<run_id>_<run_attempt>/runs
ls
# pick the test you are interested in
cd <some_systemtest_directory>
```

3. In that directory you will find:

- a copy of the tutorial,
- a `tools/` folder with the required helper scripts and Dockerfiles,
- generated Docker Compose files (`docker-compose.tutorial.yaml`,
`docker-compose.field_compare.yaml`), and
- a helper script `rerun_systemtest.sh`.

4. Re-run the test using Docker:

```bash
# if the script is executable
./rerun_systemtest.sh

# otherwise
sh rerun_systemtest.sh
```

The script rebuilds the Docker images, re-runs the tutorial containers, and (if present)
executes the field comparison step. All paths used inside the generated Docker Compose
files are relative to the system test directory, so you can move the extracted `runs/`
folder anywhere on a Linux machine with Docker installed and still re-run the tests
in the same way as on the CI runner.

## Adding new tests

### Adding tutorials
Expand Down
16 changes: 8 additions & 8 deletions tools/tests/components.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ bare: # A default component used when the solver does not have any dependencies
default: "production-audit"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
default: "develop"

python-bindings:
repository: https://github.com/precice/python-bindings
Expand All @@ -30,7 +30,7 @@ python-bindings:
default: "production-audit"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
default: "develop"
PYTHON_BINDINGS_REF:
semnantic: Git ref of the Python bindings to use
default: "master"
Expand All @@ -50,7 +50,7 @@ openfoam-adapter:
default: "production-audit"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
default: "develop"
OPENFOAM_EXECUTABLE:
description: exectuable of openfoam to use
default: "openfoam2512"
Expand All @@ -73,7 +73,7 @@ fenics-adapter:
default: "production-audit"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
default: "develop"
PYTHON_BINDINGS_REF:
semnantic: Git ref of the Python bindings to use
default: "master"
Expand All @@ -96,7 +96,7 @@ nutils-adapter:
default: "production-audit"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
default: "develop"
PYTHON_BINDINGS_REF:
semnantic: Git ref of the Python bindings to use
default: "master"
Expand All @@ -116,7 +116,7 @@ calculix-adapter:
default: "production-audit"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
default: "develop"
CALCULIX_VERSION:
description: Version of Calculix to use
default: "2.20"
Expand All @@ -139,7 +139,7 @@ su2-adapter:
default: "production-audit"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
default: "develop"
SU2_VERSION:
description: Version of SU2 to use
default: "7.5.1"
Expand All @@ -162,7 +162,7 @@ dealii-adapter:
default: "production-audit"
TUTORIALS_REF:
description: Tutorial git reference to use
default: "master"
default: "develop"
DEALII_ADAPTER_REF:
description: Version of deal.ii-adapter to use
default: "master"
73 changes: 67 additions & 6 deletions tools/tests/systemtests/Systemtest.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,20 +187,37 @@ def __get_docker_services(self) -> Dict[str, str]:
except Exception as exc:
raise KeyError("Please specify a PLATFORM argument") from exc

# Use an absolute path here only for validation that the requested
# dockerfile context exists on the machine running the system tests.
self.dockerfile_context = PRECICE_TESTS_DIR / "dockerfiles" / Path(plaform_requested)
if not self.dockerfile_context.exists():
raise ValueError(
f"The path {self.dockerfile_context.resolve()} resulting from argument PLATFORM={plaform_requested} could not be found in the system")

def render_service_template_per_case(case: Case, params_to_use: Dict[str, str]) -> str:
# Inside the individual system test directory (`self.system_test_dir`)
# we copy a full `tools/` tree into the parent run directory
# (see __copy_tools). From the point of view of the system test
# directory we therefore need to go one level up to reach the
# shared `tools/` folder:
# <run_directory>/tools/tests/dockerfiles/<PLATFORM>
# ^-------------^ parent of self.system_test_dir
dockerfile_context_relative = (
Path("..") / "tools" / "tests" / "dockerfiles" / Path(plaform_requested)
)

render_dict = {
'run_directory': self.run_directory.resolve(),
# Use a relative path to the *parent* run directory so that
# containers still see /runs/<tutorial_folder> like before,
# while keeping the compose file independent of the CI
# runner's absolute paths.
'run_directory': "..",
'tutorial_folder': self.tutorial_folder,
'build_arguments': params_to_use,
'params': params_to_use,
'case_folder': case.path,
'run': case.run_cmd,
'dockerfile_context': self.dockerfile_context,
'dockerfile_context': dockerfile_context_relative,
}
jinja_env = Environment(loader=FileSystemLoader(PRECICE_TESTS_DIR))
template = jinja_env.get_template(case.component.template)
Expand All @@ -215,12 +232,20 @@ def render_service_template_per_case(case: Case, params_to_use: Dict[str, str])
def __get_docker_compose_file(self):
rendered_services = self.__get_docker_services()
render_dict = {
'run_directory': self.run_directory.resolve(),
# See __get_docker_services: keep the docker-compose file
# portable by referring to the parent run directory only.
'run_directory': "..",
'tutorial_folder': self.tutorial_folder,
'tutorial': self.tutorial.path.name,
'services': rendered_services,
'build_arguments': self.params_to_use,
'dockerfile_context': self.dockerfile_context,
# The dockerfile_context value inside the templates is only
# used as a build context path and does not need to be
# absolute – it will be resolved relative to the system test
# directory.
'dockerfile_context': (
Path("..") / "tools" / "tests" / "dockerfiles" / Path(self.params_to_use.get("PLATFORM"))
),
'precice_output_folder': PRECICE_REL_OUTPUT_DIR,
}
jinja_env = Environment(loader=FileSystemLoader(PRECICE_TESTS_DIR))
Expand All @@ -229,7 +254,10 @@ def __get_docker_compose_file(self):

def __get_field_compare_compose_file(self):
render_dict = {
'run_directory': self.run_directory.resolve(),
# Fieldcompare should also use only relative paths from inside
# the system test directory so that the run directory can be
# moved and re-executed elsewhere.
'run_directory': "..",
'tutorial_folder': self.tutorial_folder,
'precice_output_folder': PRECICE_REL_OUTPUT_DIR,
'reference_output_folder': PRECICE_REL_REFERENCE_DIR + "/" + self.reference_result.path.name.replace(".tar.gz", ""),
Expand Down Expand Up @@ -420,9 +448,42 @@ def _build_docker(self):
logging.debug(f"Building docker image for {self}")
time_start = time.perf_counter()
docker_compose_content = self.__get_docker_compose_file()
with open(self.system_test_dir / "docker-compose.tutorial.yaml", 'w') as file:
docker_compose_path = self.system_test_dir / "docker-compose.tutorial.yaml"
with open(docker_compose_path, 'w') as file:
file.write(docker_compose_content)

# Provide a small helper script inside the system test directory so
# that a user downloading the corresponding `runs/` artifact can
# re-run the exact docker-compose setup locally without having to
# reconstruct the commands by hand.
rerun_script_path = self.system_test_dir / "rerun_systemtest.sh"
if not rerun_script_path.exists():
rerun_script_path.write_text(
"#!/usr/bin/env sh\n"
"set -e -u\n"
"\n"
"cd \"$(dirname \"$0\")\"\n"
"\n"
"echo \"[systemtests] Building tutorial images...\"\n"
"docker compose --file docker-compose.tutorial.yaml build\n"
"\n"
"echo \"[systemtests] Running tutorial containers...\"\n"
"docker compose --file docker-compose.tutorial.yaml up\n"
"\n"
"if [ -f docker-compose.field_compare.yaml ]; then\n"
" echo \"[systemtests] Running fieldcompare...\"\n"
" docker compose --file docker-compose.field_compare.yaml up --exit-code-from field-compare\n"
"fi\n"
)
# Make the script executable for convenience; even if this bit
# does not survive archiving, users can still run it via
# `sh rerun_systemtest.sh`.
try:
rerun_script_path.chmod(rerun_script_path.stat().st_mode | 0o111)
except Exception:
logging.debug(
f"Could not mark {rerun_script_path} as executable; continuing anyway.")

stdout_data = []
stderr_data = []

Expand Down