Metric#

Metric#

pydantic settings olive.evaluator.metric.Metric[source]#
field name: str [Required]#
field type: MetricType [Required]#
field backend: str | None = 'torch_metrics'#
field sub_types: List[SubMetric] [Required]#
field user_config: ConfigBase = None#
field data_config: DataConfig | None = None#
get_inference_settings(framework)[source]#
get_run_kwargs() Dict[str, Any][source]#
get_sub_type_info(info_name, no_priority_filter=True, callback=<function Metric.<lambda>>)[source]#

MetricType#

class olive.evaluator.metric.MetricType(value)[source]#

An enumeration.

ACCURACY = 'accuracy'#
CUSTOM = 'custom'#
LATENCY = 'latency'#
THROUGHPUT = 'throughput'#

AccuracySubType#

class olive.evaluator.metric.AccuracySubType(value)[source]#

An enumeration.

ACCURACY_SCORE = 'accuracy_score'#
AUROC = 'auroc'#
F1_SCORE = 'f1_score'#
PERPLEXITY = 'perplexity'#
PRECISION = 'precision'#
RECALL = 'recall'#

LatencySubType#

class olive.evaluator.metric.LatencySubType(value)[source]#

An enumeration.

AVG = 'avg'#
MAX = 'max'#
MIN = 'min'#
P50 = 'p50'#
P75 = 'p75'#
P90 = 'p90'#
P95 = 'p95'#
P99 = 'p99'#
P999 = 'p999'#

ThroughputSubType#

class olive.evaluator.metric.ThroughputSubType(value)[source]#

An enumeration.

AVG = 'avg'#
MAX = 'max'#
MIN = 'min'#
P50 = 'p50'#
P75 = 'p75'#
P90 = 'p90'#
P95 = 'p95'#
P99 = 'p99'#
P999 = 'p999'#

MetricGoal#

pydantic settings olive.evaluator.metric.MetricGoal[source]#
field type: str [Required]#
field value: float [Required]#
has_regression_goal()[source]#