Skip to content

Metric

amltk.optimization.metric #

The metric definition.

Metric dataclass #

Metric(
    name: str,
    *,
    minimize: bool = True,
    bounds: tuple[float, float] | None = None,
    fn: Callable[P, float] | None = None
)

Bases: Generic[P]

A metric with a given name, optimal direction, and possible bounds.

bounds class-attribute instance-attribute #

bounds: tuple[float, float] | None = field(
    kw_only=True, default=None
)

The bounds of the metric, if any.

fn class-attribute instance-attribute #

fn: Callable[P, float] | None = field(
    kw_only=True, default=None, compare=False
)

A function to attach to this metric to be used within a trial.

minimize class-attribute instance-attribute #

minimize: bool = field(kw_only=True, default=True)

Whether to minimize or maximize the metric.

name instance-attribute #

name: str

The name of the metric.

optimal property #

optimal: float

The optimal value of the metric.

worst property #

worst: float

The worst possible value of the metric.

Comparison #

Bases: str, Enum

The comparison between two values.

__call__ #

__call__(*args: args, **kwargs: kwargs) -> float

Call the associated function with this metric.

Source code in src/amltk/optimization/metric.py
def __call__(self, *args: P.args, **kwargs: P.kwargs) -> float:
    """Call the associated function with this metric."""
    if self.fn is None:
        raise ValueError(
            f"Metric {self.name} does not have a function to call."
            " Please provide a function to `Metric(fn=...)` if you"
            " want to call this metric like this.",
        )
    return self.fn(*args, **kwargs)

as_scorer #

as_scorer(
    *,
    response_method: (
        SklearnResponseMethods
        | Sequence[SklearnResponseMethods]
        | None
    ) = None,
    **scorer_kwargs: Any
) -> _Scorer

Convert a metric to a sklearn scorer.

PARAMETER DESCRIPTION
response_method

The response method to use for the scorer. This can be a single method or an iterable of methods.

TYPE: SklearnResponseMethods | Sequence[SklearnResponseMethods] | None DEFAULT: None

scorer_kwargs

Additional keyword arguments to pass to the scorer during the call. Forwards to sklearn.metrics.make_scorer.

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
_Scorer

The sklearn scorer.

Source code in src/amltk/optimization/metric.py
def as_scorer(
    self,
    *,
    response_method: (
        SklearnResponseMethods | Sequence[SklearnResponseMethods] | None
    ) = None,
    **scorer_kwargs: Any,
) -> _Scorer:
    """Convert a metric to a sklearn scorer.

    Args:
        response_method: The response method to use for the scorer.
            This can be a single method or an iterable of methods.
        scorer_kwargs: Additional keyword arguments to pass to the
            scorer during the call. Forwards to [`sklearn.metrics.make_scorer`][].

    Returns:
        The sklearn scorer.
    """
    from sklearn.metrics import get_scorer, make_scorer

    match self.fn:
        case None:
            try:
                return get_scorer(self.name)
            except ValueError as e:
                raise ValueError(
                    f"Could not find scorer for {self.name}."
                    " Please provide a function to `Metric(fn=...)`.",
                ) from e
        case fn:
            return make_scorer(
                fn,
                greater_is_better=not self.minimize,
                response_method=response_method,
                **scorer_kwargs,
            )

compare #

compare(v1: float, v2: float) -> Comparison

Check if v1 is better than v2.

Source code in src/amltk/optimization/metric.py
def compare(self, v1: float, v2: float) -> Metric.Comparison:
    """Check if `v1` is better than `v2`."""
    minimize = self.minimize
    if v1 == v2:
        return Metric.Comparison.EQUAL
    if v1 > v2:
        return Metric.Comparison.WORSE if minimize else Metric.Comparison.BETTER

    # v1 < v2
    return Metric.Comparison.BETTER if minimize else Metric.Comparison.WORSE

distance_to_optimal #

distance_to_optimal(v: float) -> float

The distance to the optimal value, using the bounds if possible.

Source code in src/amltk/optimization/metric.py
def distance_to_optimal(self, v: float) -> float:
    """The distance to the optimal value, using the bounds if possible."""
    match self.bounds:
        case None:
            raise ValueError(
                f"Metric {self.name} is unbounded, can not compute distance"
                " to optimal.",
            )
        case (lower, upper) if lower <= v <= upper:
            if self.minimize:
                return abs(v - lower)
            return abs(v - upper)
        case (lower, upper):
            raise ValueError(f"Value {v} is not within {self.bounds=}")
        case _:
            raise ValueError(f"Invalid {self.bounds=}")

from_str classmethod #

from_str(s: str) -> Self

Create an metric from a str.

from amltk.optimization import Metric

s = "loss (minimize)"
metric = Metric.from_str(s)
print(metric)

s = "accuracy [0.0, 1.0] (maximize)"
metric = Metric.from_str(s)
print(metric)
loss (minimize)
accuracy [0.0, 1.0] (maximize)
PARAMETER DESCRIPTION
s

The string to parse.

TYPE: str

RETURNS DESCRIPTION
Self

The parsed metric.

Source code in src/amltk/optimization/metric.py
@classmethod
def from_str(cls, s: str) -> Self:
    """Create an metric from a str.

    ```python exec="true" source="material-block" result="python"
    from amltk.optimization import Metric

    s = "loss (minimize)"
    metric = Metric.from_str(s)
    print(metric)

    s = "accuracy [0.0, 1.0] (maximize)"
    metric = Metric.from_str(s)
    print(metric)
    ```

    Args:
        s: The string to parse.

    Returns:
        The parsed metric.
    """
    splits = s.split(" ")
    # No bounds
    if len(splits) == 2:  # noqa: PLR2004
        name, minimize_str = splits
        bounds = None
    else:
        name, lower_str, upper_str, minimize_str = splits
        bounds = (float(lower_str[1:-1]), float(upper_str[:-1]))

    minimize = minimize_str == "(minimize)"
    return cls(name=name, minimize=minimize, bounds=bounds)

loss #

loss(v: float) -> float

Convert a value to a loss.

Source code in src/amltk/optimization/metric.py
def loss(self, v: float, /) -> float:
    """Convert a value to a loss."""
    return float(v) if self.minimize else -float(v)

normalized_loss #

normalized_loss(v: float) -> float

The normalized loss of a value if possible.

If both sides of the bounds are finite, we can normalize the value to be between 0 and 1.

Source code in src/amltk/optimization/metric.py
def normalized_loss(self, v: float) -> float:
    """The normalized loss of a value if possible.

    If both sides of the bounds are finite, we can normalize the value
    to be between 0 and 1.
    """
    match self.bounds:
        # If both sides are finite, we can 0-1 normalize
        case (lower, upper) if not np.isinf(lower) and not np.isinf(upper):
            cost = (v - lower) / (upper - lower)
            cost = 1 - cost if self.minimize is False else cost
        # No bounds or one unbounded bound, we can't normalize
        case _:
            cost = v if self.minimize else -v

    return cost

score #

score(v: float) -> float

Convert a value to a score.

Source code in src/amltk/optimization/metric.py
def score(self, v: float, /) -> float:
    """Convert a value to a score."""
    return -float(v) if self.minimize else float(v)

MetricCollection dataclass #

MetricCollection(*, metrics: Mapping[str, Metric] = dict())

Bases: Mapping[str, Metric]

A collection of metrics.

metrics class-attribute instance-attribute #

metrics: Mapping[str, Metric] = field(default_factory=dict)

The metrics in this collection.

as_sklearn_scorer #

as_sklearn_scorer(
    *,
    response_methods: (
        Mapping[
            str,
            SklearnResponseMethods
            | Sequence[SklearnResponseMethods],
        ]
        | None
    ) = None,
    scorer_kwargs: (
        Mapping[str, Mapping[str, Any]] | None
    ) = None,
    raise_exc: bool = True
) -> _MultimetricScorer

Convert this collection to a sklearn scorer.

Source code in src/amltk/optimization/metric.py
def as_sklearn_scorer(
    self,
    *,
    response_methods: (
        Mapping[str, SklearnResponseMethods | Sequence[SklearnResponseMethods]]
        | None
    ) = None,
    scorer_kwargs: Mapping[str, Mapping[str, Any]] | None = None,
    raise_exc: bool = True,
) -> _MultimetricScorer:
    """Convert this collection to a sklearn scorer."""
    from sklearn.metrics._scorer import _MultimetricScorer

    rms = response_methods or {}
    skwargs = scorer_kwargs or {}

    scorers = {
        k: v.as_scorer(response_method=rms.get(k), **skwargs.get(k, {}))
        for k, v in self.items()
    }
    return _MultimetricScorer(scorers=scorers, raise_exc=raise_exc)

from_collection classmethod #

from_collection(
    metrics: (
        Metric | Iterable[Metric] | Mapping[str, Metric]
    )
) -> MetricCollection

Create a metric collection from an iterable of metrics.

Source code in src/amltk/optimization/metric.py
@classmethod
def from_collection(
    cls,
    metrics: Metric | Iterable[Metric] | Mapping[str, Metric],
) -> MetricCollection:
    """Create a metric collection from an iterable of metrics."""
    match metrics:
        case Metric():
            return cls(metrics={metrics.name: metrics})
        case Mapping():
            return MetricCollection(metrics={m.name: m for m in metrics.values()})
        case Iterable():
            return cls(metrics={m.name: m for m in metrics})  # type: ignore
        case _:
            raise TypeError(
                f"Expected a Metric, Iterable[Metric], or Mapping[str, Metric]."
                f" Got {type(metrics)} instead.",
            )

from_empty classmethod #

from_empty() -> MetricCollection

Create an empty metric collection.

Source code in src/amltk/optimization/metric.py
@classmethod
def from_empty(cls) -> MetricCollection:
    """Create an empty metric collection."""
    return cls(metrics={})

optimums #

optimums() -> Mapping[str, float]

The optimums of the metrics.

Source code in src/amltk/optimization/metric.py
def optimums(self) -> Mapping[str, float]:
    """The optimums of the metrics."""
    return {k: v.optimal for k, v in self.items()}

worsts #

worsts() -> Mapping[str, float]

The worsts of the metrics.

Source code in src/amltk/optimization/metric.py
def worsts(self) -> Mapping[str, float]:
    """The worsts of the metrics."""
    return {k: v.worst for k, v in self.items()}