Skip to content

Testing

m provides several utilities to help us test our actions. We can start with this example

import pytest
from m.github.actions import Action
from m.testing import ActionStepTestCase as TCase
from m.testing import run_action_test_case
from pytest_mock import MockerFixture

from pkg.actions import actions


@pytest.mark.parametrize(
    'tcase',
    [
        TCase(
            name='test_id',
            py_file=f'src/pkg/main.py',
            inputs={
              'INPUT_ARG_A': 'val_a',
              'INPUT_ARG_B': 'val_b',
            },
            expected_stdout='Anything we print to stdout',
            outputs=['some-output=some_value'],
        ),
    ],
    ids=lambda tcase: tcase.name,
)
def test_m_gh_actions_api(tcase: TCase, mocker: MockerFixture) -> None:
    run_action_test_case(mocker, tcase)


def test_actions_instance() -> None:
    assert isinstance(actions, Action)
    assert actions.name == 'Action Name'

Important

This is not required but it is recommended to have this in the __init__.py for the root of the tests.

from m.testing import block_m_side_effects, block_network_access

block_m_side_effects()
block_network_access()

This will make sure that our tests do not make any calls to the internet and prevents our code from writing files. Instead it will force us to create mocks.

Testing API

ActionStepTestCase

Bases: BaseModel

Defines a test case for an action.

Useful in the parametrization of several use cases for an action.

Attributes:

Name Type Description
name str

Unique name for the test case. This is used as the identifier for a test case so that test failures may be easier to spot.

py_file str

Path to the python file to run.

inputs dict[str, str]

Inputs to the script. Should be of the form

{'INPUT_[SOME_NAME]': 'value'}
exit_code int

The expected exit code (default: 0).

expected_stdout str

The expected stdout (default: empty).

errors list[str]

Errors may be noisy, specify strings that are expected to be in stderr in an array of strings.

outputs list[str]

list of Github outputs. Each entry in the array should be of the form

output-name=output-value
file_write_side_effect Any | None

Defaults to Good(0). This can be provided if we need to modify the behavior of m.core.rw.write_file. This is useful if we want to test cases in which a file failed to write.

Source code in m/testing/testing.py
class ActionStepTestCase(BaseModel):
    """Defines a test case for an action.

    Useful in the parametrization of several use cases for an action.
    """

    name: str = Field(description="""
        Unique name for the test case. This is used as the identifier
        for a test case so that test failures may be easier to spot.
    """)

    py_file: str = Field(description='Path to the python file to run.')

    inputs: dict[str, str] = Field(description="""
        Inputs to the script. Should be of the form

        ```py
        {'INPUT_[SOME_NAME]': 'value'}
        ```
    """)

    exit_code: int = Field(
        default=0,
        description='The expected exit code (default: 0).',
    )

    expected_stdout: str = Field(
        default='',
        description='The expected stdout (default: empty).',
    )

    # Errors may be noisy, specify strings that are expected to be in stderr
    errors: list[str] = Field(
        default=[],
        description="""
            Errors may be noisy, specify strings that are expected to be in stderr
            in an array of strings.
        """,
    )

    # list of outputs: `output-name=output-value`
    outputs: list[str] = Field(
        default=[],
        description="""
            list of Github outputs. Each entry in the array should be of the form

            ```
            output-name=output-value
            ```
        """,
    )

    file_write_side_effect: Any | None = Field(
        default=None,
        description="""
            Defaults to [`Good(0)`][m.core.fp.Good]. This can be provided if we
            need to modify the behavior of [m.core.rw.write_file][]. This is
            useful if we want to test cases in which a file failed to write.
        """,
    )

block_m_side_effects()

Blocks functions that have side effects.

This function overrides the definition of m so that we do not accidentally try write a lot of files or create/move directories.

The pathlib.Path.mkdir function should only be blocked while developing tests. It is a reminder that we haven't mocked the function yet. If we want to get this reminder then add touch m/.m/pytest-ran after the tests run locally.

Returns:

Type Description
dict[str, Any]

A dictionary with references to the original functions that were overridden in case these are needed.

Source code in m/testing/testing.py
def block_m_side_effects() -> dict[str, Any]:
    """Blocks functions that have side effects.

    This function overrides the definition of `m` so that we do not accidentally
    try write a lot of files or create/move directories.

    The [pathlib.Path.mkdir][] function should only be blocked while developing
    tests. It is a reminder that we haven't mocked the function yet. If we want
    to get this reminder then add `touch m/.m/pytest-ran` after the tests run
    locally.

    Returns:
        A dictionary with references to the original functions that were
            overridden in case these are needed.
    """
    import shutil
    import subprocess  # noqa: S404 - importing to disable it during testing
    from pathlib import Path

    from m.core import rw as mio

    originals = {
        'write_file': mio.write_file,
        'Path_mkdir': Path.mkdir,
        'shutil_move': shutil.move,
    }

    mio.write_file = mock('m.core.rw.write_file')
    subprocess.check_output = mock('m.core.subprocess.eval_cmd')
    subprocess.call = mock('m.core.subprocess.exec_pnpm')
    shutil.move = mock('shutil.move')

    if not os.environ.get('CI'):
        # We want to make sure that we do not create directories during tests.
        # To do so we will mock the Path.mkdir function. There is a problem:
        # pytest needs this function to create directories for its own purposes.
        # For this reason we will only mock the function after we create the
        # m/.m/pytest-ran file.
        if Path('m/.m/pytest-ran').exists():
            Path.mkdir = mock('pathlib.Path.mkdir')  # type: ignore

    return originals

block_network_access()

Blocks network access for all tests.

This function overrides the definition of socket so that we do not accidentally try to run tests that make network calls. If our tests do not depend on other local services it is a good idea to call this before any of our tests runs.

Otherwise we may want to modify this function to allow certain hosts to be called. (PRs welcomed).

Source code in m/testing/testing.py
def block_network_access() -> None:
    """Blocks network access for all tests.

    This function overrides the definition of [`socket`][socket.socket] so that
    we do not accidentally try to run tests that make network calls. If our tests
    do not depend on other local services it is a good idea to call this before
    any of our tests runs.

    Otherwise we may want to modify this function to allow certain hosts to be
    called. (PRs welcomed).
    """
    # making sure that no calls to the internet are done
    socket.socket = BlockNetwork  # type: ignore

mock(func_name)

Create a function that raises an error if its not mocked.

Parameters:

Name Type Description Default
func_name str

full module path to the function to mock.

required

Returns:

Type Description
Any

A function that raises an error if its not mocked.

Source code in m/testing/testing.py
def mock(func_name: str) -> Any:
    """Create a function that raises an error if its not mocked.

    Args:
        func_name: full module path to the function to mock.

    Returns:
        A function that raises an error if its not mocked.
    """
    return partial(needs_mocking, func_name)

run_action_step(mocker, *, py_file, exit_code, env_vars, file_write_side_effect=None)

Execute an action step in a test.

This function expects the inputs to the script to be provided via environment variables of the form INPUT_[SOME_NAME]. The script will write the outputs to the file FAKE_GITHUB_OUTPUT.txt. We can verify the contents of the file by looking at the 3rd output from the function. This is a dictionary mapping file names to contents. Please note that this testing function mocks m.core.rw.write_file to obtain the file contents.

Parameters:

Name Type Description Default
mocker MockerFixture

A reference to the pytest MockerFixture.

required
py_file str

The full path to the file that Github Actions will run.

required
exit_code int

The expected exit code of the action. 0 means all is good.

required
env_vars dict[str, str]

A dictionary of the environment variables that the action will receive.

required
file_write_side_effect Any | None

This can be provided if we need to modify the behavior of m.core.rw.write_file. This is useful if we want to test cases in which a file failed to write.

None

Returns:

Type Description
tuple[str, str, dict[str, str]]

The standard out, standard error, and files written by m.core.rw.write_file.

Source code in m/testing/conftest.py
def run_action_step(
    mocker: MockerFixture,
    *,
    py_file: str,
    exit_code: int,
    env_vars: dict[str, str],
    file_write_side_effect: Any | None = None,
) -> tuple[str, str, dict[str, str]]:
    """Execute an action step in a test.

    This function expects the inputs to the script to be provided via environment
    variables of the form `INPUT_[SOME_NAME]`. The script will write the outputs
    to the file `FAKE_GITHUB_OUTPUT.txt`. We can verify the contents of the file
    by looking at the 3rd output from the function. This is a dictionary mapping
    file names to contents. Please note that this testing function mocks
    [m.core.rw.write_file][] to obtain the file contents.

    Args:
        mocker: A reference to the pytest `MockerFixture`.
        py_file: The full path to the file that Github Actions will run.
        exit_code: The expected exit code of the action. `0` means all is good.
        env_vars: A dictionary of the environment variables that the action will
            receive.
        file_write_side_effect: This can be provided if we need to modify the
            behavior of [m.core.rw.write_file][]. This is useful if we want to
            test cases in which a file failed to write.

    Returns:
        The standard out, standard error, and files written by [m.core.rw.write_file][].
    """
    mocker.patch.dict(
        os.environ,
        {
            'NO_COLOR': 'true',
            **env_vars,
            'GITHUB_OUTPUT': 'FAKE_GITHUB_OUTPUT.txt',
        },
        clear=True,
    )

    std_out = StringIO()
    std_err = StringIO()
    mocker.patch.object(sys, 'stdout', std_out)
    mocker.patch.object(sys, 'stderr', std_err)
    file_write_mock = mocker.patch('m.core.rw.write_file')
    file_write_mock.side_effect = file_write_side_effect or [Good(0)]

    prog = None
    with pytest.raises(SystemExit) as prog_block:
        prog = prog_block
        runpy.run_path(py_file, {}, '__main__')

    # Would be nice to be able to reset via a the mocker
    sys.stdout = sys.__stdout__
    sys.stderr = sys.__stderr__
    assert prog is not None  # noqa: S101 - to be used in testing
    file_writes = {
        call.args[0]: call.args[1]
        for call in file_write_mock.call_args_list
    }

    # next block should not be covered by coverage, we have this as a utility
    # to help us write tests.
    prog_code = prog.value.code
    if prog_code != exit_code:  # pragma: no cover
        print(std_out.getvalue(), file=sys.stdout)  # noqa: WPS421
        print(std_err.getvalue(), file=sys.stderr)  # noqa: WPS421
    assert prog_code == exit_code   # noqa: S101 - to be used in testing
    return std_out.getvalue(), std_err.getvalue(), file_writes

run_action_test_case(mocker, tcase)

Execute an action step test case.

This is a commodity wrapper to help us run the action tests case. If we need more control over the assertions we can then copy and modify the implementation.

Parameters:

Name Type Description Default
mocker MockerFixture

A reference to the pytest MockerFixture.

required
tcase ActionStepTestCase

The test case.

required
Source code in m/testing/conftest.py
def run_action_test_case(
    mocker: MockerFixture,
    tcase: ActionStepTestCase,
) -> None:
    """Execute an action step test case.

    This is a commodity wrapper to help us run the action tests case. If we need
    more control over the assertions we can then copy and modify the implementation.

    Args:
        mocker: A reference to the pytest `MockerFixture`.
        tcase: The test case.
    """
    stdout, stderr, file_writes = run_action_step(
        mocker,
        py_file=tcase.py_file,
        exit_code=tcase.exit_code,
        env_vars=tcase.inputs,
        file_write_side_effect=tcase.file_write_side_effect,
    )
    assert stdout == tcase.expected_stdout  # noqa: S101
    if tcase.errors:
        for error in tcase.errors:
            assert error in stderr  # noqa: S101

    if tcase.exit_code == 0:
        assert 'FAKE_GITHUB_OUTPUT.txt' in file_writes  # noqa: S101
        gh_output = file_writes['FAKE_GITHUB_OUTPUT.txt']
        assert gh_output == '\n'.join(tcase.outputs)  # noqa: S101