Skip to content

Multi objective model

smac.model.multi_objective_model #

MultiObjectiveModel #

MultiObjectiveModel(
    models: AbstractModel | list[AbstractModel],
    objectives: list[str],
    seed: int = 0,
)

Bases: AbstractModel

Wrapper for the surrogate model to predict multiple objectives.

Parameters#

models : AbstractModel | list[AbstractModel] Which model should be used. If it is a list, then it must provide as many models as objectives. If it is a single model only, the model is used for all objectives. objectives : list[str] Which objectives should be used. seed : int

Source code in smac/model/multi_objective_model.py
def __init__(
    self,
    models: AbstractModel | list[AbstractModel],
    objectives: list[str],
    seed: int = 0,
) -> None:
    self._n_objectives = len(objectives)
    if isinstance(models, list):
        assert len(models) == len(objectives)

        # Make sure the configspace is the same
        configspace = models[0]._configspace
        for m in models:
            assert configspace == m._configspace

        self._models = models
    else:
        configspace = models._configspace
        self._models = [models for _ in range(self._n_objectives)]

    super().__init__(
        configspace=configspace,
        instance_features=None,
        pca_components=None,
        seed=seed,
    )

meta property #

meta: dict[str, Any]

Returns the meta data of the created object.

models property #

The internally used surrogate models.

predict #

predict(
    X: ndarray, covariance_type: str | None = "diagonal"
) -> tuple[ndarray, ndarray | None]

Predicts mean and variance for a given X. Internally, calls the method _predict.

Parameters#

X : np.ndarray [#samples, #hyperparameters + #features] Input data points. covariance_type: str | None, defaults to "diagonal" Specifies what to return along with the mean. Applied only to Gaussian Processes. Takes four valid inputs: * None: Only the mean is returned. * "std": Standard deviation at test points is returned. * "diagonal": Diagonal of the covariance matrix is returned. * "full": Whole covariance matrix between the test points is returned.

Returns#

means : np.ndarray [#samples, #objectives] The predictive mean. vars : np.ndarray [#samples, #objectives] or [#samples, #samples] | None Predictive variance or standard deviation.

Source code in smac/model/abstract_model.py
def predict(
    self,
    X: np.ndarray,
    covariance_type: str | None = "diagonal",
) -> tuple[np.ndarray, np.ndarray | None]:
    """Predicts mean and variance for a given X. Internally, calls the method `_predict`.

    Parameters
    ----------
    X : np.ndarray [#samples, #hyperparameters + #features]
        Input data points.
    covariance_type: str | None, defaults to "diagonal"
        Specifies what to return along with the mean. Applied only to Gaussian Processes.
        Takes four valid inputs:
        * None: Only the mean is returned.
        * "std": Standard deviation at test points is returned.
        * "diagonal": Diagonal of the covariance matrix is returned.
        * "full": Whole covariance matrix between the test points is returned.

    Returns
    -------
    means : np.ndarray [#samples, #objectives]
        The predictive mean.
    vars : np.ndarray [#samples, #objectives] or [#samples, #samples] | None
        Predictive variance or standard deviation.
    """
    if len(X.shape) != 2:
        raise ValueError("Expected 2d array, got %dd array!" % len(X.shape))

    if X.shape[1] != self._n_hps + self._n_features:
        raise ValueError(
            f"Feature mismatch: X should have {self._n_hps} hyperparameters + {self._n_features} features, "
            f"but has {X.shape[1]} in total."
        )

    if self._apply_pca:
        try:
            X_feats = X[:, -self._n_features :]
            X_feats = self._scaler.transform(X_feats)
            X_feats = self._pca.transform(X_feats)
            X = np.hstack((X[:, : self._n_hps], X_feats))
        except NotFittedError:
            # PCA not fitted if only one training sample
            pass

    if X.shape[1] != len(self._types):
        raise ValueError("Rows in X should have %d entries but have %d!" % (len(self._types), X.shape[1]))

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore", "Predicted variances smaller than 0. Setting those variances to 0.")
        mean, var = self._predict(X, covariance_type)

    if len(mean.shape) == 1:
        mean = mean.reshape((-1, 1))

    if var is not None and len(var.shape) == 1:
        var = var.reshape((-1, 1))

    return mean, var

train #

train(X: ndarray, Y: ndarray) -> Self

Trains the random forest on X and Y. Internally, calls the method _train.

Parameters#

X : np.ndarray [#samples, #hyperparameters + #features] Input data points. Y : np.ndarray [#samples, #objectives] The corresponding target values.

Returns#

self : AbstractModel

Source code in smac/model/abstract_model.py
def train(self: Self, X: np.ndarray, Y: np.ndarray) -> Self:
    """Trains the random forest on X and Y. Internally, calls the method `_train`.

    Parameters
    ----------
    X : np.ndarray [#samples, #hyperparameters + #features]
        Input data points.
    Y : np.ndarray [#samples, #objectives]
        The corresponding target values.

    Returns
    -------
    self : AbstractModel
    """
    if len(X.shape) != 2:
        raise ValueError("Expected 2d array, got %dd array!" % len(X.shape))

    if X.shape[1] != self._n_hps + self._n_features:
        raise ValueError(
            f"Feature mismatch: X should have {self._n_hps} hyperparameters + {self._n_features} features, "
            f"but has {X.shape[1]} in total."
        )

    if X.shape[0] != Y.shape[0]:
        raise ValueError("X.shape[0] ({}) != y.shape[0] ({})".format(X.shape[0], Y.shape[0]))

    # Reduce dimensionality of features if larger than PCA_DIM
    if (
        self._pca_components is not None
        and X.shape[0] > self._pca.n_components
        and self._n_features >= self._pca_components
    ):
        X_feats = X[:, -self._n_features :]

        # Scale features
        X_feats = self._scaler.fit_transform(X_feats)
        X_feats = np.nan_to_num(X_feats)  # if features with max == min

        # PCA
        X_feats = self._pca.fit_transform(X_feats)
        X = np.hstack((X[:, : self._n_hps], X_feats))

        if hasattr(self, "_types"):
            # For RF, adapt types list
            # if X_feats.shape[0] < self._pca, X_feats.shape[1] == X_feats.shape[0]
            self._types = np.array(
                np.hstack((self._types[: self._n_hps], np.zeros(X_feats.shape[1]))),
                dtype=np.uint,
            )  # type: ignore

        self._apply_pca = True
    else:
        self._apply_pca = False

        if hasattr(self, "_types"):
            self._types = copy.deepcopy(self._initial_types)

    return self._train(X, Y)