tabular
class TabularBenchmark(name, table, *, config_name, fidelity_name, result_keys, config_keys, remove_constants=False, space=None, seed=None, prior=None, perturb_prior=None)
#
Bases: Benchmark[CTabular, R, F]
PARAMETER | DESCRIPTION |
---|---|
name |
The name of this benchmark.
TYPE:
|
table |
The table to use for the benchmark.
TYPE:
|
config_name |
The column in the table that contains the config id
TYPE:
|
fidelity_name |
The column in the table that contains the fidelity
TYPE:
|
result_keys |
The columns in the table that contain the results |
config_keys |
The columns in the table that contain the config values |
remove_constants |
Remove constant config columns from the data or not.
TYPE:
|
space |
The configuration space to use for the benchmark. If None, will just be an empty space.
TYPE:
|
prior |
The prior to use for the benchmark. If None, no prior is used. If a string, will be treated as a prior specific for this benchmark if it can be found, otherwise assumes it to be a Path. If a Path, will load the prior from the path. If a dict or Configuration, will be used directly.
TYPE:
|
perturb_prior |
If not None, will perturb the prior by this amount. For numericals, while for categoricals, this is interpreted as the probability of swapping the value for a random one.
TYPE:
|
seed |
The seed to use for the benchmark.
TYPE:
|
Source code in src/mfpbench/tabular.py
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 |
|
table: pd.DataFrame
attr
#
The table of results used for this benchmark
fidelity_name: str
attr
#
The name of the fidelity used in this benchmark
config_name: str
attr
#
The column in the table that contains the config id. Will be set to the index
config_keys: Sequence[str]
attr
#
The keys in the table that contain the config
result_keys: Sequence[str]
attr
#
The keys in the table that contain the results
def query(config, at=None, *, argmax=None, argmin=None)
#
Submit a query and get a result.
Passing a raw config
If a mapping is passed (and not a Config
object),
we will attempt to look for id
in the mapping, to know which config to
lookup.
If this fails, we will try match the config to one of the configs in the benchmark.
Prefer to pass the Config
object directly if possible.
Override
This function overrides the default
query()
to allow for this
config matching
PARAMETER | DESCRIPTION |
---|---|
config |
The query to use |
at |
The fidelity at which to query, defaults to None which means maximum
TYPE:
|
argmax |
Whether to return the argmax up to the point
TYPE:
|
argmin |
Whether to return the argmin up to the point
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
R
|
The result of the query |
Source code in src/mfpbench/tabular.py
def trajectory(config, *, frm=None, to=None, step=None)
#
Submit a query and get a result.
Passing a raw config
If a mapping is passed (and not a Config
object),
we will attempt to look for id
in the mapping, to know which config to
lookup.
If this fails, we will try match the config to one of the configs in the benchmark.
Prefer to pass the Config
object directly if possible.
Override
This function overrides the default
trajectory()
to allow for this
config matching
PARAMETER | DESCRIPTION |
---|---|
config |
The query to use |
frm |
Start of the curve, should default to the start
TYPE:
|
to |
End of the curve, should default to the total
TYPE:
|
step |
Step size, defaults to
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
list[R]
|
The result of the query |
Source code in src/mfpbench/tabular.py
def sample(n=None, *, seed=None)
#
Sample a random possible config.
PARAMETER | DESCRIPTION |
---|---|
n |
How many samples to take, None means jsut a single one, not in a list
TYPE:
|
seed |
The seed to use for the sampling. Seeding This is different than any seed passed to the construction of the benchmark.
TYPE:
|
RETURNS | DESCRIPTION |
---|---|
CTabular | list[CTabular]
|
Get back a possible Config to use |
Source code in src/mfpbench/tabular.py
class GenericTabularBenchmark(table, *, name=None, fidelity_name, config_name, result_keys, config_keys, result_mapping=None, remove_constants=False, space=None, seed=None, prior=None, perturb_prior=None)
#
Bases: TabularBenchmark[GenericTabularConfig, GenericTabularResult[GenericTabularConfig, F], F]
PARAMETER | DESCRIPTION |
---|---|
table |
The table to use for the benchmark
TYPE:
|
name |
The name of the benchmark. If None, will be set to
TYPE:
|
fidelity_name |
The column in the table that contains the fidelity
TYPE:
|
config_name |
The column in the table that contains the config id
TYPE:
|
result_keys |
The columns in the table that contain the results |
config_keys |
The columns in the table that contain the config values |
result_mapping |
A mapping from the result keys to the table keys. If a string, will be used as the key in the table. If a callable, will be called with the table and the result will be used as the value.
TYPE:
|
remove_constants |
Remove constant config columns from the data or not.
TYPE:
|
space |
The configuration space to use for the benchmark. If None, will just be an empty space.
TYPE:
|
seed |
The seed to use.
TYPE:
|
prior |
The prior to use for the benchmark. If None, no prior is used. If a str, will check the local location first for a prior specific for this benchmark, otherwise assumes it to be a Path. If a Path, will load the prior from the path. If a Mapping, will be used directly.
TYPE:
|
perturb_prior |
If not None, will perturb the prior by this amount. For numericals, this is interpreted as the standard deviation of a normal distribution while for categoricals, this is interpreted as the probability of swapping the value for a random one.
TYPE:
|