Skip to content

result

class Result
dataclass
#

Bases: ABC, Generic[C, F]

Collect all results in a class for clarity.

fidelity: F
attr
#

The fidelity of this result.

config: C
classvar attr
#

The config used to generate this result.

score: float
abstractmethod prop
#

The score of interest.

error: float
abstractmethod prop
#

The error of interest.

test_score: float
abstractmethod prop
#

The score on the test set.

test_error: float
abstractmethod prop
#

The error on the test set.

val_score: float
abstractmethod prop
#

The score on the validation set.

val_error: float
abstractmethod prop
#

The score on the validation set.

cost: float
abstractmethod prop
#

The time cost for evaluting this config.

def from_dict(config, result, fidelity)
classmethod
#

Create from a dict or mapping object.

Source code in src/mfpbench/result.py
@classmethod
def from_dict(
    cls,
    config: C,
    result: Mapping[str, Any],
    fidelity: F,
) -> Self:
    """Create from a dict or mapping object."""
    fieldnames = set(cls.names())
    if not fieldnames.issubset(result.keys()):
        raise ValueError(
            f"Result dict is missing fields: {fieldnames - result.keys()}",
        )
    # To help with serialization, we need to convert floats to... ehh floats
    # This is due to some things returning an np.float -_-
    result = {
        k: float(v) if isinstance(v, float) else v
        for k, v in result.items()
        if k in fieldnames
    }
    return cls(config=config, fidelity=fidelity, **result)

def names()
classmethod
#

The names of the fields in this result.

Source code in src/mfpbench/result.py
@classmethod
def names(cls) -> tuple[str, ...]:
    """The names of the fields in this result."""
    return tuple(
        f.name for f in fields(cls) if f.name not in ("config", "fidelity")
    )

def from_row(config, row, fidelity)
classmethod
#

Create from a row of a dataframe.

Source code in src/mfpbench/result.py
@classmethod
def from_row(
    cls,
    config: C,
    row: Mapping[str, Any],
    fidelity: F,
) -> Self:
    """Create from a row of a dataframe."""
    return cls.from_dict(config, dict(row), fidelity)

def dict() #

Create a dict from this result.

Source code in src/mfpbench/result.py
def dict(self) -> dict[str, Any]:
    """Create a dict from this result."""
    d = asdict(self)
    del d["config"]
    del d["fidelity"]
    return d

class GenericTabularResult
dataclass
#

Bases: Result[C, F], Generic[C, F]

A generic tabular result.

This is useful for adhoc tabular benchmarks.

score: float
prop
#

The score of interest.

error: float
prop
#

The error of interest.

test_score: float
prop
#

The score on the test set.

test_error: float
prop
#

The error on the test set.

val_score: float
prop
#

The score on the validation set.

val_error: float
prop
#

The score on the validation set.

cost: float
prop
#

The time cost for evaluting this config.

def __hash__() #

Hash based on the dictionary repr.

Source code in src/mfpbench/result.py
def __hash__(self) -> int:
    """Hash based on the dictionary repr."""
    return (
        hash(self.config) ^ hash(self.fidelity) ^ hash(tuple(self._values.items()))
    )

def dict() #

As a raw dictionary.

Source code in src/mfpbench/result.py
def dict(self) -> Any:
    """As a raw dictionary."""
    return dict(self._values)

def from_dict(config, result, fidelity)
classmethod
#

Create from a dict or mapping object.

Source code in src/mfpbench/result.py
@override
@classmethod
def from_dict(cls, config: C, result: Mapping[str, Any], fidelity: F) -> Self:
    """Create from a dict or mapping object."""
    return cls(config=config, _values=dict(result), fidelity=fidelity)

def names()
classmethod
#

The names of the fields in this result.

Source code in src/mfpbench/result.py
@classmethod
def names(cls) -> tuple[str, ...]:
    """The names of the fields in this result."""
    return tuple(
        f.name
        for f in fields(cls)
        if f.name not in ("config", "fidelity", "__values")
    )