Skip to content

Mf bo

neps.optimizers.multi_fidelity.mf_bo #

FreezeThawModel #

FreezeThawModel(
    pipeline_space,
    surrogate_model: str = "deep_gp",
    surrogate_model_args: dict = None,
)

Designed to work with model search in unit step multi-fidelity algorithms.

Source code in neps/optimizers/multi_fidelity/mf_bo.py
def __init__(
    self,
    pipeline_space,
    surrogate_model: str = "deep_gp",
    surrogate_model_args: dict = None,
):
    self.observed_configs = None
    self.pipeline_space = pipeline_space
    self.surrogate_model_name = surrogate_model
    self.surrogate_model_args = (
        surrogate_model_args if surrogate_model_args is not None else {}
    )
    if self.surrogate_model_name in ["deep_gp", "pfn"]:
        self.surrogate_model_args.update({"pipeline_space": pipeline_space})

    # instantiate the surrogate model
    self.surrogate_model = instance_from_map(
        SurrogateModelMapping,
        self.surrogate_model_name,
        name="surrogate model",
        kwargs=self.surrogate_model_args,
    )

MFBOBase #

Designed to work with model-based search on SH-based multi-fidelity algorithms.

Requires certain strict assumptions about fidelities and rung maps.

is_init_phase #

is_init_phase() -> bool

Returns True is in the warmstart phase and False under model-based search.

Source code in neps/optimizers/multi_fidelity/mf_bo.py
def is_init_phase(self) -> bool:
    """Returns True is in the warmstart phase and False under model-based search."""
    if self.modelling_type == "rung":
        # build a model per rung or per fidelity
        # in this case, the initial design checks if `init_size` number of
        # configurations have finished at a rung or not and the highest such rung is
        # chosen for model building at teh current iteration
        if self._active_rung() is None:
            return True
    elif self.modelling_type == "joint":
        # builds a model across all fidelities with the fidelity as a dimension
        # in this case, calculate the total number of function evaluations spent
        # and in vanilla BO fashion use that to compare with the initital design size
        resources = calc_total_resources_spent(self.observed_configs, self.rung_map)
        resources /= self.max_budget
        if resources < self.init_size:
            return True
    else:
        raise ValueError("Choice of modelling_type not in {{'rung', 'joint'}}")
    return False

sample_new_config #

sample_new_config(rung: int = None, **kwargs)

Samples configuration from policies or random.

Source code in neps/optimizers/multi_fidelity/mf_bo.py
def sample_new_config(
    self,
    rung: int = None,
    **kwargs,
):
    """Samples configuration from policies or random."""
    if self.model_based and not self.is_init_phase():
        incumbent = None
        if self.modelling_type == "rung":
            # `rung` should not be None when not in init phase
            active_max_rung = self._active_rung()
            fidelity = None
            active_max_fidelity = self.rung_map[active_max_rung]
        elif self.modelling_type == "joint":
            fidelity = self.rung_map[rung]
            active_max_fidelity = None
            # IMPORTANT step for correct 2-step acquisition
            incumbent = min(self.rung_histories[rung]["perf"])
        else:
            fidelity = active_max_fidelity = None
        assert (
            (fidelity is None and active_max_fidelity is not None)
            or (active_max_fidelity is None and fidelity is not None)
            or (active_max_fidelity is not None and fidelity is not None)
        ), "Either condition needs to be not None!"
        config = self.model_policy.sample(
            active_max_fidelity=active_max_fidelity,
            fidelity=fidelity,
            incumbent=incumbent,
            **self.sampling_args,
        )
    elif self.sampling_policy is not None:
        config = self.sampling_policy.sample(**self.sampling_args)
    else:
        config = self.pipeline_space.sample(
            patience=self.patience,
            user_priors=self.use_priors,
            ignore_fidelity=True,
        )
    return config

PFNSurrogate #

PFNSurrogate(*args, **kwargs)

Bases: FreezeThawModel

Special class to deal with PFN surrogate model and freeze-thaw acquisition.

Source code in neps/optimizers/multi_fidelity/mf_bo.py
def __init__(self, *args, **kwargs):
    super().__init__(*args, **kwargs)
    self.train_x = None
    self.train_y = None