Coverage for mlos_bench/mlos_bench/tests/config/schemas/globals/test_globals_schemas.py: 100%
10 statements
« prev ^ index » next coverage.py v7.6.7, created at 2024-11-22 01:18 +0000
« prev ^ index » next coverage.py v7.6.7, created at 2024-11-22 01:18 +0000
1#
2# Copyright (c) Microsoft Corporation.
3# Licensed under the MIT License.
4#
5"""Tests for CLI schema validation."""
7from os import path
9import pytest
11from mlos_bench.config.schemas import ConfigSchema
12from mlos_bench.tests.config.schemas import (
13 check_test_case_against_schema,
14 get_schema_test_cases,
15)
17# General testing strategy:
18# - hand code a set of good/bad configs (useful to test editor schema checking)
19# - for each config, load and validate against expected schema
21TEST_CASES = get_schema_test_cases(path.join(path.dirname(__file__), "test-cases"))
24# Now we actually perform all of those validation tests.
27@pytest.mark.parametrize("test_case_name", sorted(TEST_CASES.by_path))
28def test_globals_configs_against_schema(test_case_name: str) -> None:
29 """Checks that the CLI config validates against the schema."""
30 check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.GLOBALS)
31 if TEST_CASES.by_path[test_case_name].test_case_type != "bad":
32 # Unified schema has a hard time validating bad configs, so we skip it.
33 # The trouble is that tunable-values, cli, globals all look like flat dicts
34 # with minor constraints on them, so adding/removing params doesn't
35 # invalidate it against all of the config types.
36 check_test_case_against_schema(TEST_CASES.by_path[test_case_name], ConfigSchema.UNIFIED)