Skip to content

Status

neps.status.status #

Functions to get the status of a run and save the status to CSV files.

get_run_summary_csv #

get_run_summary_csv(root_directory: str | Path) -> None

Create CSV files summarizing the run data.

PARAMETER DESCRIPTION
root_directory

The root directory of the NePS run.

TYPE: str | Path

Source code in neps/status/status.py
def get_run_summary_csv(root_directory: str | Path) -> None:
    """Create CSV files summarizing the run data.

    Args:
        root_directory: The root directory of the NePS run.
    """
    post_run_csv(root_directory=root_directory)

get_summary_dict #

get_summary_dict(
    root_directory: str | Path, *, add_details: bool = False
) -> dict[str, Any]

Create a dict that summarizes a run.

PARAMETER DESCRIPTION
root_directory

The root directory given to neps.run.

TYPE: str | Path

add_details

If true, add detailed dicts for previous_results, pending_configs, and pending_configs_free.

TYPE: bool DEFAULT: False

RETURNS DESCRIPTION
summary_dict

Information summarizing a run

TYPE: dict[str, Any]

Source code in neps/status/status.py
def get_summary_dict(
    root_directory: str | Path,
    *,
    add_details: bool = False,
) -> dict[str, Any]:
    """Create a dict that summarizes a run.

    Args:
        root_directory: The root directory given to neps.run.
        add_details: If true, add detailed dicts for previous_results, pending_configs,
            and pending_configs_free.

    Returns:
        summary_dict: Information summarizing a run
    """
    root_directory = Path(root_directory)
    shared_state = SharedState(root_directory)

    # NOTE: We don't lock the shared state since we are just reading and don't need to
    # make decisions based on the state
    trial_refs = shared_state.trial_refs()
    evaluated = [r.to_result() for r in trial_refs[Trial.State.COMPLETE]]
    pending = [r.load() for r in trial_refs[Trial.State.PENDING]]
    in_progress = [r.load() for r in trial_refs[Trial.State.IN_PROGRESS]]

    summary: dict[str, Any] = {}

    if add_details:
        summary["previous_results"] = {c.id: c for c in evaluated}
        summary["pending_configs"] = {c.id: c for c in in_progress + pending}
        summary["pending_configs_free"] = {c: id for c in pending}

    summary["num_evaluated_configs"] = len(evaluated)
    summary["num_pending_configs"] = len(in_progress) + len(pending)
    summary["num_pending_configs_with_worker"] = len(in_progress)

    summary["best_loss"] = float("inf")
    summary["best_config_id"] = None
    summary["best_config_metadata"] = None
    summary["best_config"] = None
    summary["num_error"] = 0
    for evaluation in evaluated:
        if evaluation.result == "error":
            summary["num_error"] += 1
        loss = _get_loss(evaluation.result, ignore_errors=True)
        if isinstance(loss, float) and loss < summary["best_loss"]:
            summary["best_loss"] = _get_loss(evaluation.result)
            summary["best_config"] = evaluation.config
            summary["best_config_id"] = evaluation.id
            summary["best_config_metadata"] = evaluation.metadata

    return summary

post_run_csv #

post_run_csv(root_directory: str | Path) -> None

Create CSV files summarizing the run data.

PARAMETER DESCRIPTION
root_directory

The root directory of the NePS run.

TYPE: str | Path

Source code in neps/status/status.py
def post_run_csv(root_directory: str | Path) -> None:
    """Create CSV files summarizing the run data.

    Args:
        root_directory: The root directory of the NePS run.
    """
    csv_config_data, csv_rundata, csv_locker = _initiate_summary_csv(root_directory)

    df_config_data, df_run_data = _get_dataframes_from_summary(
        root_directory,
        include_metadatas=True,
        include_results=True,
        include_configs=True,
    )

    _save_data_to_csv(
        csv_config_data,
        csv_rundata,
        csv_locker,
        df_config_data,
        df_run_data,
    )

status #

status(
    root_directory: str | Path,
    *,
    best_losses: bool = False,
    best_configs: bool = False,
    all_configs: bool = False,
    print_summary: bool = True
) -> tuple[dict[str, ConfigResult], dict[str, SearchSpace]]

Print status information of a neps run and return results.

PARAMETER DESCRIPTION
root_directory

The root directory given to neps.run.

TYPE: str | Path

best_losses

If true, show the trajectory of the best loss across evaluations

TYPE: bool DEFAULT: False

best_configs

If true, show the trajectory of the best configs and their losses across evaluations

TYPE: bool DEFAULT: False

all_configs

If true, show all configs and their losses

TYPE: bool DEFAULT: False

print_summary

If true, print a summary of the current run state

TYPE: bool DEFAULT: True

RETURNS DESCRIPTION
previous_results

Already evaluated configurations and results. pending_configs: Configs that have been sampled, but have not finished evaluating

TYPE: tuple[dict[str, ConfigResult], dict[str, SearchSpace]]

Source code in neps/status/status.py
def status(
    root_directory: str | Path,
    *,
    best_losses: bool = False,
    best_configs: bool = False,
    all_configs: bool = False,
    print_summary: bool = True,
) -> tuple[dict[str, ConfigResult], dict[str, SearchSpace]]:
    """Print status information of a neps run and return results.

    Args:
        root_directory: The root directory given to neps.run.
        best_losses: If true, show the trajectory of the best loss across evaluations
        best_configs: If true, show the trajectory of the best configs and their losses
            across evaluations
        all_configs: If true, show all configs and their losses
        print_summary: If true, print a summary of the current run state

    Returns:
        previous_results: Already evaluated configurations and results.
        pending_configs: Configs that have been sampled, but have not finished evaluating
    """
    root_directory = Path(root_directory)
    summary = get_summary_dict(root_directory, add_details=True)

    if print_summary:
        print(f"#Evaluated configs: {summary['num_evaluated_configs']}")
        print(f"#Pending configs: {summary['num_pending_configs']}")
        print(
            f"#Pending configs with worker: {summary['num_pending_configs_with_worker']}",
        )

        print(f"#Crashed configs: {summary['num_error']}")

        if len(summary["previous_results"]) == 0:
            return summary["previous_results"], summary["pending_configs"]

        print()
        print(f"Best loss: {summary['best_loss']}")
        print(f"Best config id: {summary['best_config_id']}")
        print(f"Best config: {summary['best_config']}")

        if best_losses:
            print()
            print("Best loss across evaluations:")
            best_loss_trajectory = root_directory / "best_loss_trajectory.txt"
            print(best_loss_trajectory.read_text(encoding="utf-8"))

        if best_configs:
            print()
            print("Best configs and their losses across evaluations:")
            print(79 * "-")
            best_loss_config = root_directory / "best_loss_with_config_trajectory.txt"
            print(best_loss_config.read_text(encoding="utf-8"))

        if all_configs:
            print()
            print("All evaluated configs and their losses:")
            print(79 * "-")
            all_loss_config = root_directory / "all_losses_and_configs.txt"
            print(all_loss_config.read_text(encoding="utf-8"))

    return summary["previous_results"], summary["pending_configs"]