Coverage for mlos_bench/mlos_bench/tests/storage/exp_load_test.py: 100%
72 statements
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-06 00:35 +0000
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-06 00:35 +0000
1#
2# Copyright (c) Microsoft Corporation.
3# Licensed under the MIT License.
4#
5"""
6Unit tests for the storage subsystem.
7"""
8from datetime import datetime, tzinfo
9from typing import Optional
11from pytz import UTC
13import pytest
15from mlos_bench.environments.status import Status
16from mlos_bench.tunables.tunable_groups import TunableGroups
17from mlos_bench.storage.base_storage import Storage
18from mlos_bench.tests import ZONE_INFO
21def test_exp_load_empty(exp_storage: Storage.Experiment) -> None:
22 """
23 Try to retrieve old experimental data from the empty storage.
24 """
25 (trial_ids, configs, scores, status) = exp_storage.load()
26 assert not trial_ids
27 assert not configs
28 assert not scores
29 assert not status
32def test_exp_pending_empty(exp_storage: Storage.Experiment) -> None:
33 """
34 Try to retrieve pending experiments from the empty storage.
35 """
36 trials = list(exp_storage.pending_trials(datetime.now(UTC), running=True))
37 assert not trials
40@pytest.mark.parametrize(("zone_info"), ZONE_INFO)
41def test_exp_trial_pending(exp_storage: Storage.Experiment,
42 tunable_groups: TunableGroups,
43 zone_info: Optional[tzinfo]) -> None:
44 """
45 Start a trial and check that it is pending.
46 """
47 trial = exp_storage.new_trial(tunable_groups)
48 (pending,) = list(exp_storage.pending_trials(datetime.now(zone_info), running=True))
49 assert pending.trial_id == trial.trial_id
50 assert pending.tunables == tunable_groups
53@pytest.mark.parametrize(("zone_info"), ZONE_INFO)
54def test_exp_trial_pending_many(exp_storage: Storage.Experiment,
55 tunable_groups: TunableGroups,
56 zone_info: Optional[tzinfo]) -> None:
57 """
58 Start THREE trials and check that both are pending.
59 """
60 config1 = tunable_groups.copy().assign({'idle': 'mwait'})
61 config2 = tunable_groups.copy().assign({'idle': 'noidle'})
62 trial_ids = {
63 exp_storage.new_trial(config1).trial_id,
64 exp_storage.new_trial(config2).trial_id,
65 exp_storage.new_trial(config2).trial_id, # Submit same config twice
66 }
67 pending_ids = {
68 pending.trial_id
69 for pending in exp_storage.pending_trials(datetime.now(zone_info), running=True)
70 }
71 assert len(pending_ids) == 3
72 assert trial_ids == pending_ids
75@pytest.mark.parametrize(("zone_info"), ZONE_INFO)
76def test_exp_trial_pending_fail(exp_storage: Storage.Experiment,
77 tunable_groups: TunableGroups,
78 zone_info: Optional[tzinfo]) -> None:
79 """
80 Start a trial, fail it, and and check that it is NOT pending.
81 """
82 trial = exp_storage.new_trial(tunable_groups)
83 trial.update(Status.FAILED, datetime.now(zone_info))
84 trials = list(exp_storage.pending_trials(datetime.now(zone_info), running=True))
85 assert not trials
88@pytest.mark.parametrize(("zone_info"), ZONE_INFO)
89def test_exp_trial_success(exp_storage: Storage.Experiment,
90 tunable_groups: TunableGroups,
91 zone_info: Optional[tzinfo]) -> None:
92 """
93 Start a trial, finish it successfully, and and check that it is NOT pending.
94 """
95 trial = exp_storage.new_trial(tunable_groups)
96 trial.update(Status.SUCCEEDED, datetime.now(zone_info), 99.9)
97 trials = list(exp_storage.pending_trials(datetime.now(zone_info), running=True))
98 assert not trials
101@pytest.mark.parametrize(("zone_info"), ZONE_INFO)
102def test_exp_trial_update_categ(exp_storage: Storage.Experiment,
103 tunable_groups: TunableGroups,
104 zone_info: Optional[tzinfo]) -> None:
105 """
106 Update the trial with multiple metrics, some of which are categorical.
107 """
108 trial = exp_storage.new_trial(tunable_groups)
109 trial.update(Status.SUCCEEDED, datetime.now(zone_info), {"score": 99.9, "benchmark": "test"})
110 assert exp_storage.load() == (
111 [trial.trial_id],
112 [{
113 'idle': 'halt',
114 'kernel_sched_latency_ns': '2000000',
115 'kernel_sched_migration_cost_ns': '-1',
116 'vmSize': 'Standard_B4ms'
117 }],
118 [{"score": "99.9", "benchmark": "test"}],
119 [Status.SUCCEEDED]
120 )
123@pytest.mark.parametrize(("zone_info"), ZONE_INFO)
124def test_exp_trial_update_twice(exp_storage: Storage.Experiment,
125 tunable_groups: TunableGroups,
126 zone_info: Optional[tzinfo]) -> None:
127 """
128 Update the trial status twice and receive an error.
129 """
130 trial = exp_storage.new_trial(tunable_groups)
131 trial.update(Status.FAILED, datetime.now(zone_info))
132 with pytest.raises(RuntimeError):
133 trial.update(Status.SUCCEEDED, datetime.now(UTC), 99.9)
136@pytest.mark.parametrize(("zone_info"), ZONE_INFO)
137def test_exp_trial_pending_3(exp_storage: Storage.Experiment,
138 tunable_groups: TunableGroups,
139 zone_info: Optional[tzinfo]) -> None:
140 """
141 Start THREE trials, let one succeed, another one fail and keep one not updated.
142 Check that one is still pending another one can be loaded into the optimizer.
143 """
144 score = 99.9
146 trial_fail = exp_storage.new_trial(tunable_groups)
147 trial_succ = exp_storage.new_trial(tunable_groups)
148 trial_pend = exp_storage.new_trial(tunable_groups)
150 trial_fail.update(Status.FAILED, datetime.now(zone_info))
151 trial_succ.update(Status.SUCCEEDED, datetime.now(zone_info), score)
153 (pending,) = list(exp_storage.pending_trials(datetime.now(UTC), running=True))
154 assert pending.trial_id == trial_pend.trial_id
156 (trial_ids, configs, scores, status) = exp_storage.load()
157 assert trial_ids == [trial_fail.trial_id, trial_succ.trial_id]
158 assert len(configs) == 2
159 assert scores == [None, {"score": f"{score}"}]
160 assert status == [Status.FAILED, Status.SUCCEEDED]
161 assert tunable_groups.copy().assign(configs[0]).reset() == trial_fail.tunables
162 assert tunable_groups.copy().assign(configs[1]).reset() == trial_succ.tunables