Coverage for mlos_bench/mlos_bench/tests/config/schemas/schedulers/test_scheduler_schemas.py: 100%

25 statements  

« prev     ^ index     » next       coverage.py v7.5.1, created at 2024-05-06 00:35 +0000

1# 

2# Copyright (c) Microsoft Corporation. 

3# Licensed under the MIT License. 

4# 

5""" 

6Tests for schedulers schema validation. 

7""" 

8 

9from os import path 

10 

11import pytest 

12 

13from mlos_core.tests import get_all_concrete_subclasses 

14 

15from mlos_bench.config.schemas import ConfigSchema 

16from mlos_bench.schedulers.base_scheduler import Scheduler 

17 

18from mlos_bench.tests import try_resolve_class_name 

19from mlos_bench.tests.config.schemas import (get_schema_test_cases, 

20 check_test_case_against_schema, 

21 check_test_case_config_with_extra_param) 

22 

23 

24# General testing strategy: 

25# - hand code a set of good/bad configs (useful to test editor schema checking) 

26# - enumerate and try to check that we've covered all the cases 

27# - for each config, load and validate against expected schema 

28 

29TEST_CASES = get_schema_test_cases(path.join(path.dirname(__file__), "test-cases")) 

30 

31 

32# Dynamically enumerate some of the cases we want to make sure we cover. 

33 

34expected_mlos_bench_scheduler_class_names = [subclass.__module__ + "." + subclass.__name__ 

35 for subclass in get_all_concrete_subclasses(Scheduler, # type: ignore[type-abstract] 

36 pkg_name='mlos_bench')] 

37assert expected_mlos_bench_scheduler_class_names 

38 

39# Do the full cross product of all the test cases and all the scheduler types. 

40 

41 

42@pytest.mark.parametrize("test_case_subtype", sorted(TEST_CASES.by_subtype)) 

43@pytest.mark.parametrize("mlos_bench_scheduler_type", expected_mlos_bench_scheduler_class_names) 

44def test_case_coverage_mlos_bench_scheduler_type(test_case_subtype: str, mlos_bench_scheduler_type: str) -> None: 

45 """ 

46 Checks to see if there is a given type of test case for the given mlos_bench scheduler type. 

47 """ 

48 for test_case in TEST_CASES.by_subtype[test_case_subtype].values(): 

49 if try_resolve_class_name(test_case.config.get("class")) == mlos_bench_scheduler_type: 

50 return 

51 raise NotImplementedError( 

52 f"Missing test case for subtype {test_case_subtype} for Scheduler class {mlos_bench_scheduler_type}") 

53 

54# Now we actually perform all of those validation tests. 

55 

56 

57@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_path)) 

58def test_scheduler_configs_against_schema(test_case_name: str) -> None: 

59 """ 

60 Checks that the scheduler config validates against the schema. 

61 """ 

62 check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.SCHEDULER) 

63 check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.UNIFIED) 

64 

65 

66@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_type["good"])) 

67def test_scheduler_configs_with_extra_param(test_case_name: str) -> None: 

68 """ 

69 Checks that the scheduler config fails to validate if extra params are present in certain places. 

70 """ 

71 check_test_case_config_with_extra_param(TEST_CASES.by_type["good"][test_case_name], ConfigSchema.SCHEDULER) 

72 check_test_case_config_with_extra_param(TEST_CASES.by_type["good"][test_case_name], ConfigSchema.UNIFIED) 

73 

74 

75if __name__ == "__main__": 

76 pytest.main([__file__, "-n0"])