Coverage for mlos_bench/mlos_bench/optimizers/one_shot_optimizer.py: 100%

17 statements  

« prev     ^ index     » next       coverage.py v7.8.0, created at 2025-04-01 00:52 +0000

1# 

2# Copyright (c) Microsoft Corporation. 

3# Licensed under the MIT License. 

4# 

5""" 

6No-op optimizer for mlos_bench that proposes a single configuration. 

7 

8Explicit configs (partial or full) are possible using configuration files. 

9 

10Examples 

11-------- 

12Load tunables from a JSON string. 

13Note: normally these would be automatically loaded from the 

14:py:mod:`~mlos_bench.environments.base_environment.Environment`'s 

15``include_tunables`` config parameter. 

16 

17>>> import json5 as json 

18>>> from mlos_bench.environments.status import Status 

19>>> from mlos_bench.services.config_persistence import ConfigPersistenceService 

20>>> service = ConfigPersistenceService() 

21>>> json_config = ''' 

22... { 

23... "group_1": { 

24... "cost": 1, 

25... "params": { 

26... "colors": { 

27... "type": "categorical", 

28... "values": ["red", "blue", "green"], 

29... "default": "green", 

30... }, 

31... "int_param": { 

32... "type": "int", 

33... "range": [1, 3], 

34... "default": 2, 

35... }, 

36... "float_param": { 

37... "type": "float", 

38... "range": [0, 1], 

39... "default": 0.5, 

40... // Quantize the range into 3 bins 

41... "quantization_bins": 3, 

42... } 

43... } 

44... } 

45... } 

46... ''' 

47>>> tunables = service.load_tunables(jsons=[json_config]) 

48>>> # Check the defaults: 

49>>> tunables.get_param_values() 

50{'colors': 'green', 'int_param': 2, 'float_param': 0.5} 

51 

52Load a JSON config of some tunable values to explicitly test. 

53Normally these would be provided by the :py:mod:`mlos_bench.run` CLI's 

54``--tunable-values`` option. 

55 

56>>> tunable_values_json = ''' 

57... { 

58... "colors": "red", 

59... "int_param": 1, 

60... "float_param": 0.0 

61... } 

62... ''' 

63>>> tunable_values = json.loads(tunable_values_json) 

64>>> tunables.assign(tunable_values).get_param_values() 

65{'colors': 'red', 'int_param': 1, 'float_param': 0.0} 

66>>> assert not tunables.is_defaults() 

67 

68Now create a OneShotOptimizer from a JSON config string. 

69 

70>>> optimizer_json_config = ''' 

71... { 

72... "class": "mlos_bench.optimizers.one_shot_optimizer.OneShotOptimizer", 

73... } 

74... ''' 

75>>> config = json.loads(optimizer_json_config) 

76>>> optimizer = service.build_optimizer( 

77... tunables=tunables, 

78... service=service, 

79... config=config, 

80... ) 

81 

82Run the optimizer. 

83 

84>>> # Note that it will only run for a single iteration and return the values we set. 

85>>> while optimizer.not_converged(): 

86... suggestion = optimizer.suggest() 

87... print(suggestion.get_param_values()) 

88{'colors': 'red', 'int_param': 1, 'float_param': 0.0} 

89""" 

90 

91import logging 

92 

93from mlos_bench.optimizers.mock_optimizer import MockOptimizer 

94from mlos_bench.services.base_service import Service 

95from mlos_bench.tunables.tunable_groups import TunableGroups 

96 

97_LOG = logging.getLogger(__name__) 

98 

99 

100class OneShotOptimizer(MockOptimizer): 

101 """ 

102 No-op optimizer that proposes a single configuration and returns. 

103 

104 Explicit configs (partial or full) are possible using configuration files. 

105 """ 

106 

107 def __init__( 

108 self, 

109 tunables: TunableGroups, 

110 config: dict, 

111 global_config: dict | None = None, 

112 service: Service | None = None, 

113 ): 

114 super().__init__(tunables, config, global_config, service) 

115 _LOG.info("Run a single iteration for: %s", self._tunables) 

116 self._max_suggestions = 1 # Always run for just one iteration. 

117 

118 def suggest(self) -> TunableGroups: 

119 """Always produce the same (initial) suggestion.""" 

120 tunables = super().suggest() 

121 self._start_with_defaults = True 

122 return tunables 

123 

124 @property 

125 def supports_preload(self) -> bool: 

126 return False