History
The History
is
used to keep a structured record of what occured with
Trial
s and their associated
Report
s.
Usage
from amltk.optimization import Trial, History, Metric
from amltk.store import PathBucket
loss = Metric("loss", minimize=True)
def target_function(trial: Trial) -> Trial.Report:
x = trial.config["x"]
y = trial.config["y"]
trial.store({"config.json": trial.config})
with trial.begin():
loss = x**2 - y
if trial.exception:
return trial.fail()
return trial.success(loss=loss)
# ... usually obtained from an optimizer
bucket = PathBucket("all-trial-results")
history = History()
for x, y in zip([1, 2, 3], [4, 5, 6]):
trial = Trial(name="some-unique-name", config={"x": x, "y": y}, bucket=bucket, metrics=[loss])
report = target_function(trial)
history.add(report)
print(history.df())
bucket.rmdir() # markdon-exec: hide
status trial_seed ... time:kind time:unit
name ...
some-unique-name success
You'll often need to perform some operations on a
History
so we provide some utility functions here:
filter(key=...)
- Filters the history by some predicate, e.g.history.filter(lambda report: report.status == "success")
groupby(key=...)
- Groups the history by some key, e.g.history.groupby(lambda report: report.config["x"] < 5)
sortby(key=...)
- Sorts the history by some key, e.g.history.sortby(lambda report: report.time.end)
There is also some serialization capabilities built in, to allow you to store your reports and load them back in later:
df(...)
- Output apd.DataFrame
of all the information available.from_df(...)
- Create aHistory
from apd.DataFrame
.
You can also retrieve individual reports from the history by using their
name, e.g. history["some-unique-name"]
or iterate through
the history with for report in history: ...
.
class History
dataclass
#
Bases: RichRenderable
A history of trials.
This is a collections of reports from trials, where you can access
the reports by their trial name. It is unsorted in general, but
by using sortby()
you
can sort the history.
from amltk.optimization import Trial, History, Metric
metric = Metric("cost", minimize=True)
trials = [
Trial(name=f"trial_{i}", config={"x": i}, metrics=[metric])
for i in range(10)
]
history = History()
for trial in trials:
with trial.begin():
x = trial.config["x"]
report = trial.success(cost=x**2 - x*2 + 4)
history.add(report)
for report in history:
print(f"{report.name=}, {report}")
print(history.metrics)
print(history.df())
print(history.best())
report.name='trial_0', Trial.Report(trial=Trial(name='trial_0', config={'x': 0}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 4.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=4.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_1', Trial.Report(trial=Trial(name='trial_1', config={'x': 1}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 3.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=3.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_2', Trial.Report(trial=Trial(name='trial_2', config={'x': 2}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 4.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=4.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_3', Trial.Report(trial=Trial(name='trial_3', config={'x': 3}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 7.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=7.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_4', Trial.Report(trial=Trial(name='trial_4', config={'x': 4}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 12.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=12.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_5', Trial.Report(trial=Trial(name='trial_5', config={'x': 5}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 19.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=19.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_6', Trial.Report(trial=Trial(name='trial_6', config={'x': 6}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 28.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=28.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_7', Trial.Report(trial=Trial(name='trial_7', config={'x': 7}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 39.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=39.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_8', Trial.Report(trial=Trial(name='trial_8', config={'x': 8}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 52.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=52.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
report.name='trial_9', Trial.Report(trial=Trial(name='trial_9', config={'x': 9}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 67.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=67.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': Metric(name='cost', minimize=True, bounds=None)}
status trial_seed exception ... time:duration time:kind time:unit
name ...
trial_0 success <NA> NA ... 0.000037 wall seconds
trial_1 success <NA> NA ... 0.000024 wall seconds
trial_2 success <NA> NA ... 0.000021 wall seconds
trial_3 success <NA> NA ... 0.00003 wall seconds
trial_4 success <NA> NA ... 0.000021 wall seconds
trial_5 success <NA> NA ... 0.000021 wall seconds
trial_6 success <NA> NA ... 0.000021 wall seconds
trial_7 success <NA> NA ... 0.000021 wall seconds
trial_8 success <NA> NA ... 0.000021 wall seconds
trial_9 success <NA> NA ... 0.00002 wall seconds
[10 rows x 19 columns]
Trial.Report(trial=Trial(name='trial_1', config={'x': 1}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 3.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=3.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
ATTRIBUTE | DESCRIPTION |
---|---|
reports |
A mapping of trial names to reports. |
def from_reports(reports)
classmethod
#
Creates a history from reports.
PARAMETER | DESCRIPTION |
---|---|
reports |
An iterable of reports. |
RETURNS | DESCRIPTION |
---|---|
History
|
A history. |
Source code in src/amltk/optimization/history.py
def best(metric=None)
#
Returns the best report in the history.
PARAMETER | DESCRIPTION |
---|---|
metric |
The metric to sort by. If
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
Report
|
The best report. |
Source code in src/amltk/optimization/history.py
def add(report)
#
Adds a report or reports to the history.
PARAMETER | DESCRIPTION |
---|---|
report |
A report or reports to add. |
Source code in src/amltk/optimization/history.py
def find(name)
#
def df(*, profiles=True, configs=True, summary=True, metrics=True, normalize_time=True)
#
Returns a pandas DataFrame of the history.
Each individual trial will be a row in the dataframe.
Prefixes
summary
: Entries will be prefixed with"summary:"
config
: Entries will be prefixed with"config:"
metrics
: Entries will be prefixed with"metrics:"
from amltk.optimization import Trial, History, Metric
metric = Metric("cost", minimize=True)
trials = [Trial(name=f"trial_{i}", config={"x": i}, metrics=[metric]) for i in range(10)]
history = History()
for trial in trials:
with trial.begin():
x = trial.config["x"]
report = trial.success(cost=x**2 - x*2 + 4)
history.add(report)
print(history.df())
status trial_seed exception ... time:duration time:kind time:unit
name ...
trial_0 success <NA> NA ... 0.000036 wall seconds
trial_1 success <NA> NA ... 0.000025 wall seconds
trial_2 success <NA> NA ... 0.000021 wall seconds
trial_3 success <NA> NA ... 0.000022 wall seconds
trial_4 success <NA> NA ... 0.000022 wall seconds
trial_5 success <NA> NA ... 0.000021 wall seconds
trial_6 success <NA> NA ... 0.000021 wall seconds
trial_7 success <NA> NA ... 0.00002 wall seconds
trial_8 success <NA> NA ... 0.00002 wall seconds
trial_9 success <NA> NA ... 0.00002 wall seconds
[10 rows x 19 columns]
PARAMETER | DESCRIPTION |
---|---|
profiles |
Whether to include the profiles.
TYPE:
|
configs |
Whether to include the configs.
TYPE:
|
summary |
Whether to include the summary.
TYPE:
|
metrics |
Whether to include the metrics.
TYPE:
|
normalize_time |
Whether to normalize the time to the first
report. If given a Will normalize all columns with |
RETURNS | DESCRIPTION |
---|---|
DataFrame
|
A pandas DataFrame of the history. |
Source code in src/amltk/optimization/history.py
215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 |
|
def filter(key)
#
Filters the history by a predicate.
from amltk.optimization import Trial, History, Metric
metric = Metric("cost", minimize=True)
trials = [Trial(name=f"trial_{i}", config={"x": i}, metrics=[metric]) for i in range(10)]
history = History()
for trial in trials:
with trial.begin():
x = trial.config["x"]
report = trial.success(cost=x**2 - x*2 + 4)
history.add(report)
filtered_history = history.filter(lambda report: report.metrics["cost"] < 10)
for report in filtered_history:
cost = report.metrics["cost"]
print(f"{report.name}, {cost=}, {report}")
trial_0, cost=4.0, Trial.Report(trial=Trial(name='trial_0', config={'x': 0}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 4.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=4.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
trial_1, cost=3.0, Trial.Report(trial=Trial(name='trial_1', config={'x': 1}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 3.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=3.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
trial_2, cost=4.0, Trial.Report(trial=Trial(name='trial_2', config={'x': 2}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 4.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=4.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
trial_3, cost=7.0, Trial.Report(trial=Trial(name='trial_3', config={'x': 3}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 7.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=7.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
PARAMETER | DESCRIPTION |
---|---|
key |
A predicate to filter by. |
RETURNS | DESCRIPTION |
---|---|
History
|
A new history with the filtered reports. |
Source code in src/amltk/optimization/history.py
def groupby(key)
#
Groups the history by the values of a key.
from amltk.optimization import Trial, History, Metric
metric = Metric("cost", minimize=True)
trials = [Trial(name=f"trial_{i}", config={"x": i}, metrics=[metric]) for i in range(10)]
history = History()
for trial in trials:
with trial.begin():
x = trial.config["x"]
if x % 2 == 0:
report = trial.fail(cost=1_000)
else:
report = trial.success(cost=x**2 - x*2 + 4)
history.add(report)
for status, history in history.groupby("status").items():
print(f"{status=}, {len(history)=}")
You can pass a Callable
to group by any key you like:
from amltk.optimization import Trial, History, Metric
metric = Metric("cost", minimize=True)
trials = [Trial(name=f"trial_{i}", config={"x": i}, metrics=[metric]) for i in range(10)]
history = History()
for trial in trials:
with trial.begin():
x = trial.config["x"]
report = trial.fail(cost=x)
history.add(report)
for below_5, history in history.groupby(lambda r: r.metrics["cost"] < 5).items():
print(f"{below_5=}, {len(history)=}")
PARAMETER | DESCRIPTION |
---|---|
key |
A key to group by. If |
RETURNS | DESCRIPTION |
---|---|
dict[Hashable, History]
|
A mapping of keys to histories. |
Source code in src/amltk/optimization/history.py
def incumbents(key, *, sortby=lambda : report.time.end, reverse=None, ffill=False)
#
Returns a trace of the incumbents, where only the report that is better than the previous best report is kept.
from amltk.optimization import Trial, History, Metric
metric = Metric("cost", minimize=True)
trials = [Trial(name=f"trial_{i}", config={"x": i}, metrics=[metric]) for i in range(10)]
history = History()
for trial in trials:
with trial.begin():
x = trial.config["x"]
report = trial.success(cost=x**2 - x*2 + 4)
history.add(report)
incumbents = (
history
.incumbents("cost", sortby=lambda r: r.time.end)
)
for report in incumbents:
print(f"{report.metrics=}, {report.config=}")
PARAMETER | DESCRIPTION |
---|---|
key |
The key to use. If given a str, it will use that as the
key to use in the metrics, defining if one report is better
than another. If given a |
sortby |
The key to sort by. If given a str, it will sort by
the value of that key in the
TYPE:
|
reverse |
Whether to sort in some given order. By
default (
TYPE:
|
ffill |
Whether to forward fill the incumbents. This means that if a report is not an incumbent, it will be replaced with the current best. This is useful if you want to visualize the incumbents over some x axis, where the you have a point at every place along the axis.
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
list[Report]
|
The history of incumbents. |
Source code in src/amltk/optimization/history.py
def sortby(key, *, reverse=None)
#
Sorts the history by a key and returns a sorted History.
from amltk.optimization import Trial, History, Metric
metric = Metric("cost", minimize=True)
trials = [Trial(name=f"trial_{i}", config={"x": i}, metrics=[metric]) for i in range(10)]
history = History()
for trial in trials:
with trial.begin():
x = trial.config["x"]
report = trial.success(cost=x**2 - x*2 + 4)
history.add(report)
trace = (
history
.filter(lambda report: report.status == "success")
.sortby("cost")
)
for report in trace:
print(f"{report.metrics}, {report}")
{'cost': 3.0}, Trial.Report(trial=Trial(name='trial_1', config={'x': 1}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 3.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=3.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 4.0}, Trial.Report(trial=Trial(name='trial_0', config={'x': 0}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 4.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=4.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 4.0}, Trial.Report(trial=Trial(name='trial_2', config={'x': 2}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 4.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=4.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 7.0}, Trial.Report(trial=Trial(name='trial_3', config={'x': 3}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 7.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=7.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 12.0}, Trial.Report(trial=Trial(name='trial_4', config={'x': 4}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 12.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=12.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 19.0}, Trial.Report(trial=Trial(name='trial_5', config={'x': 5}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 19.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=19.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 28.0}, Trial.Report(trial=Trial(name='trial_6', config={'x': 6}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 28.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=28.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 39.0}, Trial.Report(trial=Trial(name='trial_7', config={'x': 7}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 39.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=39.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 52.0}, Trial.Report(trial=Trial(name='trial_8', config={'x': 8}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 52.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=52.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
{'cost': 67.0}, Trial.Report(trial=Trial(name='trial_9', config={'x': 9}, bucket=PathBucket(PosixPath('unknown-trial-bucket')), metrics=[Metric(name='cost', minimize=True, bounds=None)], seed=None, fidelities=None, summary={}, exception=None, storage=set(), extras={}), status=<Status.SUCCESS: 'success'>, metrics={'cost': 67.0}, metric_values=(Metric.Value(metric=Metric(name='cost', minimize=True, bounds=None), value=67.0),), metric_defs={'cost': Metric(name='cost', minimize=True, bounds=None)}, metric_names=('cost',))
PARAMETER | DESCRIPTION |
---|---|
key |
The key to sort by. If given a str, it will sort by
the value of that key in the
TYPE:
|
reverse |
Whether to sort in some given order. By
default (
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
list[Report]
|
A sorted list of reports |