Coverage for mlos_bench/mlos_bench/tests/environments/__init__.py: 100%
31 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-10-30 00:51 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2025-10-30 00:51 +0000
1#
2# Copyright (c) Microsoft Corporation.
3# Licensed under the MIT License.
4#
5"""Tests helpers for mlos_bench.environments."""
7from datetime import datetime
8from typing import Any
10import pytest
12from mlos_bench.environments.base_environment import Environment
13from mlos_bench.environments.status import Status
14from mlos_bench.tunables.tunable_groups import TunableGroups
15from mlos_bench.tunables.tunable_types import TunableValue
18def check_env_success(
19 env: Environment,
20 tunable_groups: TunableGroups,
21 *,
22 expected_results: dict[str, TunableValue] | None,
23 expected_telemetry: list[tuple[datetime, str, Any]],
24 expected_status_run: set[Status] | None = None,
25 expected_status_next: set[Status] | None = None,
26 global_config: dict | None = None,
27) -> None:
28 """
29 Set up an environment and run a test experiment there.
31 Parameters
32 ----------
33 tunable_groups : TunableGroups
34 Tunable parameters (usually come from a fixture).
35 env : Environment
36 An environment to query for the results.
37 expected_results : dict[str, float]
38 Expected results of the benchmark.
39 expected_telemetry : list[tuple[datetime, str, Any]]
40 Expected telemetry data of the benchmark.
41 expected_status_run : set[Status]
42 Expected status right after the trial.
43 Default is the `SUCCEEDED` value.
44 expected_status_next : set[Status]
45 Expected status values for the next trial.
46 Default is the same set as in `.is_good()`.
47 global_config : dict
48 Global params.
49 """
50 # pylint: disable=too-many-arguments
51 if expected_status_run is None:
52 expected_status_run = {Status.SUCCEEDED}
54 if expected_status_next is None:
55 expected_status_next = {
56 Status.PENDING,
57 Status.READY,
58 Status.RUNNING,
59 Status.SUCCEEDED,
60 }
62 with env as env_context:
64 assert env_context.setup(tunable_groups, global_config)
66 (status, _ts, data) = env_context.run()
67 assert status in expected_status_run
68 if expected_results is None:
69 assert data is None
70 else:
71 assert data == pytest.approx(expected_results, nan_ok=True)
73 (status, _ts, telemetry) = env_context.status()
74 assert status in expected_status_next
75 assert telemetry == pytest.approx(expected_telemetry, nan_ok=True)
77 env_context.teardown()
78 assert not env_context._is_ready # pylint: disable=protected-access
81def check_env_fail_telemetry(env: Environment, tunable_groups: TunableGroups) -> None:
82 """
83 Set up a local environment and run a test experiment there; Make sure the
84 environment `.status()` call fails.
86 Parameters
87 ----------
88 tunable_groups : TunableGroups
89 Tunable parameters (usually come from a fixture).
90 env : Environment
91 An environment to query for the results.
92 """
93 with env as env_context:
95 assert env_context.setup(tunable_groups)
96 (status, _ts, _data) = env_context.run()
97 assert status.is_succeeded()
99 with pytest.raises(ValueError):
100 env_context.status()