Skip to content

Commit

Permalink
Merge pull request #1778 from buildtesters/post_run
Browse files Browse the repository at this point in the history
Add support for post_run
  • Loading branch information
shahzebsiddiqui committed May 17, 2024
2 parents 39772b4 + d42ad66 commit 399b3a2
Show file tree
Hide file tree
Showing 11 changed files with 124 additions and 10 deletions.
57 changes: 57 additions & 0 deletions buildtest/builders/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,6 +328,7 @@ def build(self, modules=None, modulepurge=None, unload_modules=None):
self._build_setup()
self._write_test()
self._write_build_script(modules, modulepurge, unload_modules)
self._write_post_run_script()

def run(self, cmd, timeout=None):
"""This is the entry point for running the test. This method will prepare test to be run, then
Expand Down Expand Up @@ -369,6 +370,32 @@ def execute_run(self, cmd, timeout):
command.execute(timeout=timeout)
return command

def execute_post_run_script(self):

if self.post_run_script:
post_run = BuildTestCommand(self.post_run_script)
post_run.execute()
output = post_run.get_output()
error = post_run.get_error()
if len(output) >= 10:
output = output[-10:]

if len(error) >= 10:
error = error[-10:]

console.print(
f"[blue]{self}[/]: Running Post Run Script: [cyan]{self.post_run_script}[/cyan]"
)
console.print(
f"[blue]{self}[/]: Post run script exit code: {post_run.returncode()}"
)

console.rule(f"[blue]{self}[/]: Post Run Script Output")
console.print(f"[blue]{' '.join(output)}")

console.rule(f"[red]{self}[/]: Post Run Script Error")
console.print(f"[red]{' '.join(error)}")

def handle_run_result(self, command_result, timeout):
"""This method will handle the result of running test. If the test is successful we will record endtime,
copy output and error file to test directory and set state to complete. If the test fails we will retry the test based on retry count.
Expand All @@ -377,8 +404,15 @@ def handle_run_result(self, command_result, timeout):
launch_command = command_result.get_command()
self.logger.debug(f"Running Test via command: {launch_command}")
ret = command_result.returncode()
output_msg = command_result.get_output()
err_msg = command_result.get_error()

if len(output_msg) >= 10:
output_msg = output_msg[-10:]

console.rule(f"[blue]Output Message for {self}")
console.print(f"[blue]{' '.join(output_msg)}")

if len(err_msg) >= 60:
err_msg = err_msg[-60:]
if not self._retry or ret == 0:
Expand Down Expand Up @@ -625,6 +659,27 @@ def _write_build_script(self, modules=None, modulepurge=None, unload_modules=Non
self.build_script = dest
self.metadata["build_script"] = self.build_script

def _write_post_run_script(self):
"""This method will write the content of post run script that is run after the test is complete.
The post run script is used to perform cleanup operations after test is complete.
Upon creating file we set permission of builder script to 755 so test can be run.
"""

self.post_run_script = f"{os.path.join(self.stage_dir, self.name)}_postrun.sh"

if not self.recipe.get("post_run"):
return

lines = ["#!/bin/bash -v"]
lines += self.recipe["post_run"].split("\n")

lines = "\n".join(lines)
write_file(self.post_run_script, lines)
self._set_execute_perm(self.post_run_script)
console.print(
f"[blue]{self}[/]: Writing Post Run Script: {self.post_run_script}"
)

def _write_test(self):
"""This method is responsible for invoking ``generate_script`` that
formulates content of testscript which is implemented in each subclass.
Expand Down Expand Up @@ -1058,6 +1113,8 @@ def post_run_steps(self):
# mark job is success if it finished all post run steps
self.complete()

self.execute_post_run_script()

def is_valid_metric(self, name):
if name not in list(self.metadata["metrics"].keys()):
return False
Expand Down
3 changes: 3 additions & 0 deletions buildtest/schemas/script.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@
"run": {
"$ref": "definitions.schema.json#/definitions/run"
},
"post_run": {
"$ref": "definitions.schema.json#/definitions/run"
},
"status": {
"$ref": "definitions.schema.json#/definitions/status"
},
Expand Down
1 change: 1 addition & 0 deletions buildtest/schemas/spack.schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"type": "string",
"description": "Shell commands run after spack"
},
"post_run": { "$ref": "definitions.schema.json#/definitions/run" },
"needs": {
"$ref": "definitions.schema.json#/definitions/needs"
},
Expand Down
9 changes: 0 additions & 9 deletions docs/writing_buildspecs/comparison_operators.rst
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ from the regular expression search. The item must be non-negative number.
.. literalinclude:: ../tutorials/perf_checks/assert_ge.yml
:language: yaml
:emphasize-lines: 12-48
:linenos:


buildtest will evaluate each assertion in the list and use a logical AND to determine the final
Expand Down Expand Up @@ -65,7 +64,6 @@ In this example, we perform a **>** operation, this can be done via ``assert_gt`
.. literalinclude:: ../tutorials/perf_checks/assert_gt.yml
:language: yaml
:emphasize-lines: 37-47
:linenos:

.. _assert_le:

Expand All @@ -77,7 +75,6 @@ In this example, we perform a **<=** operation, this can be done via ``assert_le
.. literalinclude:: ../tutorials/perf_checks/assert_le.yml
:language: yaml
:emphasize-lines: 37-47
:linenos:

.. _assert_lt:

Expand All @@ -89,7 +86,6 @@ In this example, we perform a **<** operation, this can be done via ``assert_lt`
.. literalinclude:: ../tutorials/perf_checks/assert_lt.yml
:language: yaml
:emphasize-lines: 37-47
:linenos:

.. _assert_eq:

Expand All @@ -104,7 +100,6 @@ type (``int``, ``float``, ``str``).
.. literalinclude:: ../tutorials/perf_checks/assert_eq.yml
:language: yaml
:emphasize-lines: 40-50
:linenos:

This test is expected to pass where all assertions are **True**. Let's build the test and see the output

Expand All @@ -123,7 +118,6 @@ a mismatch in value captured by metric ``x`` which is **1** however the referenc
.. literalinclude:: ../tutorials/perf_checks/assert_eq_exceptions.yml
:language: yaml
:emphasize-lines: 22-23,28-29,33,42-43
:linenos:

Let's build this test and see the output.

Expand All @@ -145,7 +139,6 @@ should pass. The reference value is converted to the data-type (``type`` field)
.. literalinclude:: ../tutorials/perf_checks/assert_ne.yml
:language: yaml
:emphasize-lines: 17,23,29,35,41-49
:linenos:

We expect this test to pass. In order to run this test, you can do the following

Expand All @@ -166,7 +159,6 @@ is an example using the ``assert_range`` property with stream benchmark.
.. literalinclude:: ../tutorials/perf_checks/assert_range.yml
:language: yaml
:emphasize-lines: 37-51
:linenos:

Let's build this test and see the output

Expand All @@ -192,7 +184,6 @@ string equivalent **'1'**.
.. literalinclude:: ../tutorials/perf_checks/contains.yml
:language: yaml
:emphasize-lines: 17-25,41-45
:linenos:

You can run this test, by running the following command

Expand Down
2 changes: 2 additions & 0 deletions docs/writing_buildspecs/customize_shell.rst
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ with stdout stream.

.. literalinclude:: ../tutorials/shebang.yml
:language: yaml
:emphasize-lines: 5,17

Now let's run this test as we see the following.

Expand All @@ -112,6 +113,7 @@ Here is a python example calculating area of circle

.. literalinclude:: ../tutorials/python-shell.yml
:language: yaml
:emphasize-lines: 5,8-13


.. note::
Expand Down
3 changes: 2 additions & 1 deletion docs/writing_buildspecs/multi_executor.rst
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ will override the ``sbatch`` property defined in the top-level file otherwise it

.. literalinclude:: ../tutorials/multi_executors/executor_scheduler.yml
:language: yaml

:emphasize-lines: 8-13

.. dropdown:: ``buildtest build -b tutorials/multi_executors/executor_scheduler.yml``

Expand All @@ -85,6 +85,7 @@ see :ref:`cray_burstbuffer_datawarp`.

.. literalinclude:: ../tutorials/burstbuffer_datawarp_executors.yml
:language: yaml
:emphasize-lines: 9-18

Custom Status by Executor
--------------------------
Expand Down
33 changes: 33 additions & 0 deletions docs/writing_buildspecs/status_check.rst
Original file line number Diff line number Diff line change
Expand Up @@ -493,3 +493,36 @@ Let's try running this test example and see the generated output, all test shoul
.. dropdown:: ``buildtest build -b tutorials/test_status/specify_regex_type.yml``

.. command-output:: buildtest build -b tutorials/test_status/specify_regex_type.yml

`post_run`: Specify Post Run Tests
-----------------------------------

Buildtest can run additional commands after test execution that can be specified via ``post_run`` property. This can be used to perform cleanup or additional
operations after test execution. To demonstrate this example we have the following buildspec. In this test we will create a directory named **demo** and create
a symbolic link named **$HOME/.bashrc_link**. You will notice that in ``status`` check we are comparing the existence of directory and symbolic link. This
is where ``post_run`` script comes into play where we want to remove the created directory and symbolic link. If we were to do this in ``run`` section, the test
will fail since **status** check will fail to find these files. The ``post_run`` property is a list of commands that will be executed in a bash shell.

.. literalinclude:: ../tutorials/post_run.yml
:language: yaml
:emphasize-lines: 6-16

Now we can build this test and see the output, note buildtest will run the post_run script after test execution and show the exit code of test. It won't affect the actual
test behavior even if the post run script fails to execute properly.

.. dropdown:: ``buildtest build -b tutorials/post_run.yml``

.. command-output:: buildtest build -b tutorials/post_run.yml

We can confirm the files are not present by checking existence of these files by running the following commands

.. command-output:: ls -l $HOME/.bashrc_link
:shell:
:returncode: 2

We can retrieve the full path to stage directory via ``buildtest path -s`` command given the name of test. The **demo** directory is created in stage directory
so if we run the following command we should see this directory is not present.

.. command-output:: ls -l $(buildtest path -s post_run_example)/demo
:shell:
:returncode: 2
1 change: 1 addition & 0 deletions tests/builders/post_run.yml
8 changes: 8 additions & 0 deletions tests/builders/test_builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,3 +234,11 @@ def test_metrics_file_regex_with_invalid_linenum():
configuration=config,
)
cmd.build()


def test_post_run_check():
"""This test will run post run check with status check"""
cmd = BuildTest(
buildspecs=[os.path.join(here, "post_run.yml")], configuration=config
)
cmd.build()
16 changes: 16 additions & 0 deletions tutorials/post_run.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
buildspecs:
post_run_example:
type: script
executor: generic.local.bash
description: post run example that will remove symbolic link
run: |
ln -s $HOME/.bashrc $HOME/.bashrc_link
mkdir demo
post_run: |
unlink $HOME/.bashrc_link
rmdir demo
status:
is_dir:
- demo
is_symlink:
- $HOME/.bashrc_link

0 comments on commit 399b3a2

Please sign in to comment.