Coverage for mlos_bench/mlos_bench/tests/config/schemas/schedulers/test_scheduler_schemas.py: 100%
25 statements
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-06 00:35 +0000
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-06 00:35 +0000
1#
2# Copyright (c) Microsoft Corporation.
3# Licensed under the MIT License.
4#
5"""
6Tests for schedulers schema validation.
7"""
9from os import path
11import pytest
13from mlos_core.tests import get_all_concrete_subclasses
15from mlos_bench.config.schemas import ConfigSchema
16from mlos_bench.schedulers.base_scheduler import Scheduler
18from mlos_bench.tests import try_resolve_class_name
19from mlos_bench.tests.config.schemas import (get_schema_test_cases,
20 check_test_case_against_schema,
21 check_test_case_config_with_extra_param)
24# General testing strategy:
25# - hand code a set of good/bad configs (useful to test editor schema checking)
26# - enumerate and try to check that we've covered all the cases
27# - for each config, load and validate against expected schema
29TEST_CASES = get_schema_test_cases(path.join(path.dirname(__file__), "test-cases"))
32# Dynamically enumerate some of the cases we want to make sure we cover.
34expected_mlos_bench_scheduler_class_names = [subclass.__module__ + "." + subclass.__name__
35 for subclass in get_all_concrete_subclasses(Scheduler, # type: ignore[type-abstract]
36 pkg_name='mlos_bench')]
37assert expected_mlos_bench_scheduler_class_names
39# Do the full cross product of all the test cases and all the scheduler types.
42@pytest.mark.parametrize("test_case_subtype", sorted(TEST_CASES.by_subtype))
43@pytest.mark.parametrize("mlos_bench_scheduler_type", expected_mlos_bench_scheduler_class_names)
44def test_case_coverage_mlos_bench_scheduler_type(test_case_subtype: str, mlos_bench_scheduler_type: str) -> None:
45 """
46 Checks to see if there is a given type of test case for the given mlos_bench scheduler type.
47 """
48 for test_case in TEST_CASES.by_subtype[test_case_subtype].values():
49 if try_resolve_class_name(test_case.config.get("class")) == mlos_bench_scheduler_type:
50 return
51 raise NotImplementedError(
52 f"Missing test case for subtype {test_case_subtype} for Scheduler class {mlos_bench_scheduler_type}")
54# Now we actually perform all of those validation tests.
57@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_path))
58def test_scheduler_configs_against_schema(test_case_name: str) -> None:
59 """
60 Checks that the scheduler config validates against the schema.
61 """
62 check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.SCHEDULER)
63 check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.UNIFIED)
66@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_type["good"]))
67def test_scheduler_configs_with_extra_param(test_case_name: str) -> None:
68 """
69 Checks that the scheduler config fails to validate if extra params are present in certain places.
70 """
71 check_test_case_config_with_extra_param(TEST_CASES.by_type["good"][test_case_name], ConfigSchema.SCHEDULER)
72 check_test_case_config_with_extra_param(TEST_CASES.by_type["good"][test_case_name], ConfigSchema.UNIFIED)
75if __name__ == "__main__":
76 pytest.main([__file__, "-n0"])