Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions ax/benchmark/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -396,10 +396,9 @@ def _update_benchmark_tracking_vars_in_place(
# problems, because Ax's best-point functionality doesn't know
# to predict at the target task or fidelity.
if compute_best_params:
(best_params,) = method.get_best_parameters(
best_params = method.get_best_parameters(
experiment=experiment,
optimization_config=problem.optimization_config,
n_points=problem.n_best_points,
)
best_params_list.append(best_params)

Expand Down
30 changes: 6 additions & 24 deletions ax/benchmark/benchmark_method.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,39 +61,23 @@ def get_best_parameters(
self,
experiment: Experiment,
optimization_config: OptimizationConfig,
n_points: int,
) -> list[TParameterization]:
) -> TParameterization:
"""
Get ``n_points`` promising points. NOTE: Only SOO with n_points = 1 is
supported.

The expected use case is that these points will be evaluated against an
oracle for hypervolume (if multi-objective) or for the value of the best
parameter (if single-objective).

For multi-objective cases, ``n_points > 1`` is needed. For SOO, ``n_points > 1``
reflects setups where we can choose some points which will then be
evaluated noiselessly or at high fidelity and then use the best one.

Get the most promising point. NOTE: Only SOO is supported.

Args:
experiment: The experiment to get the data from. This should contain
values that would be observed in a realistic setting and not
contain oracle values.
optimization_config: The ``optimization_config`` for the corresponding
``BenchmarkProblem``.
n_points: The number of points to return.
"""
if isinstance(optimization_config, MultiObjectiveOptimizationConfig):
raise NotImplementedError(
"BenchmarkMethod.get_pareto_optimal_parameters is not currently "
"supported for multi-objective problems."
)

if n_points != 1:
raise NotImplementedError(
f"Currently only n_points=1 is supported. Got {n_points=}."
)
if len(experiment.trials) == 0:
raise ValueError(
"Cannot identify a best point if experiment has no trials."
Expand All @@ -102,10 +86,9 @@ def get_best_parameters(
def _get_first_parameterization_from_last_trial() -> TParameterization:
return experiment.trials[max(experiment.trials)].arms[0].parameters

# SOO, n=1 case.
# Note: This has the same effect as orchestrator.get_best_parameters
if len(experiment.trials_by_status[TrialStatus.COMPLETED]) == 0:
return [_get_first_parameterization_from_last_trial()]
return _get_first_parameterization_from_last_trial()

result = BestPointMixin._get_best_trial(
experiment=experiment,
Expand All @@ -115,7 +98,6 @@ def _get_first_parameterization_from_last_trial() -> TParameterization:
if result is None:
# This can happen if no points are predicted to satisfy all outcome
# constraints.
params = _get_first_parameterization_from_last_trial()
else:
_, params, _ = none_throws(result)
return [params]
return _get_first_parameterization_from_last_trial()
_, params, _ = none_throws(result)
return params
5 changes: 0 additions & 5 deletions ax/benchmark/benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,6 @@ class BenchmarkProblem(Base):
default) or the ``inference_trace``. See ``BenchmarkResult`` for
more information. Currently, this is only supported for
single-objective problems.
n_best_points: Number of points for a best-point selector to recommend.
Currently, only ``n_best_points=1`` is supported.
step_runtime_function: Optionally, a function that takes in ``params``
(typically dictionaries mapping strings to ``TParamValue``s) and
returns the runtime of an step. If ``step_runtime_function`` is
Expand All @@ -111,7 +109,6 @@ class BenchmarkProblem(Base):
baseline_value: float
search_space: SearchSpace = field(repr=False)
report_inference_value_as_trace: bool = False
n_best_points: int = 1
step_runtime_function: TBenchmarkStepRuntimeFunction | None = None
target_fidelity_and_task: Mapping[str, TParamValue] = field(default_factory=dict)
status_quo_params: Mapping[str, TParamValue] | None = None
Expand All @@ -121,8 +118,6 @@ class BenchmarkProblem(Base):

def __post_init__(self) -> None:
# Validate inputs
if self.n_best_points != 1:
raise NotImplementedError("Only `n_best_points=1` is currently supported.")
if self.report_inference_value_as_trace and self.is_moo:
raise NotImplementedError(
"Inference trace is not supported for MOO. Please set "
Expand Down
4 changes: 1 addition & 3 deletions ax/benchmark/tests/methods/test_methods.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,10 +155,8 @@ def test_get_best_parameters(self) -> None:
+ "get_best_parameters_from_model_predictions_with_trial_index",
wraps=get_best_parameters_from_model_predictions_with_trial_index,
) as mock_get_best_parameters_from_predictions:
best_params = method.get_best_parameters(
method.get_best_parameters(
experiment=experiment,
optimization_config=problem.optimization_config,
n_points=1,
)
mock_get_best_parameters_from_predictions.assert_called_once()
self.assertEqual(len(best_params), 1)
19 changes: 4 additions & 15 deletions ax/benchmark/tests/test_benchmark_method.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,22 +62,15 @@ def test_get_best_parameters(self) -> None:
NotImplementedError, "not currently supported for multi-objective"
):
method.get_best_parameters(
experiment=experiment, optimization_config=moo_config, n_points=1
experiment=experiment, optimization_config=moo_config
)

soo_config = get_soo_opt_config(outcome_names=["a"])
with self.subTest("Multiple points not supported"), self.assertRaisesRegex(
NotImplementedError, "only n_points=1"
):
method.get_best_parameters(
experiment=experiment, optimization_config=soo_config, n_points=2
)

with self.subTest("Empty experiment"), self.assertRaisesRegex(
ValueError, "Cannot identify a best point if experiment has no trials"
):
method.get_best_parameters(
experiment=experiment, optimization_config=soo_config, n_points=1
experiment=experiment, optimization_config=soo_config
)

with self.subTest("All constraints violated"):
Expand All @@ -86,12 +79,10 @@ def test_get_best_parameters(self) -> None:
constrained=True,
)
best_point = method.get_best_parameters(
n_points=1,
experiment=experiment,
optimization_config=none_throws(experiment.optimization_config),
)
self.assertEqual(len(best_point), 1)
self.assertEqual(best_point[0], experiment.trials[1].arms[0].parameters)
self.assertEqual(best_point, experiment.trials[1].arms[0].parameters)

with self.subTest("No completed trials"):
experiment = get_experiment_with_observations(observations=[])
Expand All @@ -100,9 +91,7 @@ def test_get_best_parameters(self) -> None:
trial = experiment.new_trial(generator_run=sobol_generator.gen(n=1))
trial.run()
best_point = method.get_best_parameters(
n_points=1,
experiment=experiment,
optimization_config=none_throws(experiment.optimization_config),
)
self.assertEqual(len(best_point), 1)
self.assertEqual(best_point[0], experiment.trials[2].arms[0].parameters)
self.assertEqual(best_point, experiment.trials[2].arms[0].parameters)
38 changes: 0 additions & 38 deletions ax/benchmark/tests/test_benchmark_problem.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,44 +46,6 @@ def setUp(self) -> None:
self.maxDiff = None
super().setUp()

def test_inference_value_not_implemented(self) -> None:
objectives = [
Objective(metric=BenchmarkMetric(name, lower_is_better=True))
for name in ["Branin", "Currin"]
]
optimization_config = OptimizationConfig(objective=objectives[0])
test_function = BoTorchTestFunction(
botorch_problem=Branin(), outcome_names=["Branin"]
)
with self.assertRaisesRegex(NotImplementedError, "Only `n_best_points=1`"):
BenchmarkProblem(
name="foo",
optimization_config=optimization_config,
num_trials=1,
optimal_value=0.0,
baseline_value=1.0,
search_space=SearchSpace(parameters=[]),
test_function=test_function,
n_best_points=2,
)

with self.assertRaisesRegex(
NotImplementedError, "Inference trace is not supported for MOO"
):
BenchmarkProblem(
name="foo",
optimization_config=MultiObjectiveOptimizationConfig(
objective=MultiObjective(objectives)
),
num_trials=1,
optimal_value=0.0,
search_space=SearchSpace(parameters=[]),
baseline_value=1.0,
test_function=test_function,
n_best_points=1,
report_inference_value_as_trace=True,
)

def test_mismatch_of_names_on_test_function_and_opt_config_raises(self) -> None:
objectives = [
Objective(metric=BenchmarkMetric(name, lower_is_better=True))
Expand Down