Coverage for mlos_bench/mlos_bench/tests/config/environments/local/scripts/bench_run.py: 40%
10 statements
« prev ^ index » next coverage.py v7.6.7, created at 2024-11-22 01:18 +0000
« prev ^ index » next coverage.py v7.6.7, created at 2024-11-22 01:18 +0000
1#!/usr/bin/env python3
2#
3# Copyright (c) Microsoft Corporation.
4# Licensed under the MIT License.
5#
6"""
7Helper script to run the benchmark and store the results and telemetry in CSV files.
9This is a sample script that demonstrates how to produce the benchmark results
10and telemetry in the format that MLOS expects.
12THIS IS A TOY EXAMPLE. The script does not run any actual benchmarks and produces fake
13data for demonstration purposes. Please copy and extend it to suit your needs.
15Run:
16 ./bench_run.py ./output-metrics.csv ./output-telemetry.csv`
17"""
19import argparse
20from datetime import datetime, timedelta
22import pandas
25def _main(output_metrics: str, output_telemetry: str) -> None:
27 # Some fake const data that we can check in the unit tests.
28 # Our unit tests expect the `score` metric to be present in the output.
29 df_metrics = pandas.DataFrame(
30 [
31 {"metric": "score", "value": 123.4}, # A copy of `total_time`
32 {"metric": "total_time", "value": 123.4},
33 {"metric": "latency", "value": 9.876},
34 {"metric": "throughput", "value": 1234567},
35 ]
36 )
37 df_metrics.to_csv(output_metrics, index=False)
39 # Timestamps are const so we can check them in the tests.
40 timestamp = datetime(2024, 10, 25, 13, 45)
41 ts_delta = timedelta(seconds=30)
43 df_telemetry = pandas.DataFrame(
44 [
45 {"timestamp": timestamp, "metric": "cpu_load", "value": 0.1},
46 {"timestamp": timestamp, "metric": "mem_usage", "value": 20.0},
47 {"timestamp": timestamp + ts_delta, "metric": "cpu_load", "value": 0.6},
48 {"timestamp": timestamp + ts_delta, "metric": "mem_usage", "value": 33.0},
49 {"timestamp": timestamp + 2 * ts_delta, "metric": "cpu_load", "value": 0.5},
50 {"timestamp": timestamp + 2 * ts_delta, "metric": "mem_usage", "value": 31.0},
51 ]
52 )
53 df_telemetry.to_csv(output_telemetry, index=False)
56if __name__ == "__main__":
57 parser = argparse.ArgumentParser(
58 description="Run the benchmark and save the results in CSV files."
59 )
60 parser.add_argument("output_metrics", help="CSV file to save the benchmark results to.")
61 parser.add_argument("output_telemetry", help="CSV file for telemetry data.")
62 args = parser.parse_args()
63 _main(args.output_metrics, args.output_telemetry)