diff --git a/ax/benchmark/benchmark.py b/ax/benchmark/benchmark.py index f58f1b08df2..0a71388b73c 100644 --- a/ax/benchmark/benchmark.py +++ b/ax/benchmark/benchmark.py @@ -396,10 +396,9 @@ def _update_benchmark_tracking_vars_in_place( # problems, because Ax's best-point functionality doesn't know # to predict at the target task or fidelity. if compute_best_params: - (best_params,) = method.get_best_parameters( + best_params = method.get_best_parameters( experiment=experiment, optimization_config=problem.optimization_config, - n_points=problem.n_best_points, ) best_params_list.append(best_params) diff --git a/ax/benchmark/benchmark_method.py b/ax/benchmark/benchmark_method.py index 67447b9a650..2a9463ddbd7 100644 --- a/ax/benchmark/benchmark_method.py +++ b/ax/benchmark/benchmark_method.py @@ -61,20 +61,9 @@ def get_best_parameters( self, experiment: Experiment, optimization_config: OptimizationConfig, - n_points: int, - ) -> list[TParameterization]: + ) -> TParameterization: """ - Get ``n_points`` promising points. NOTE: Only SOO with n_points = 1 is - supported. - - The expected use case is that these points will be evaluated against an - oracle for hypervolume (if multi-objective) or for the value of the best - parameter (if single-objective). - - For multi-objective cases, ``n_points > 1`` is needed. For SOO, ``n_points > 1`` - reflects setups where we can choose some points which will then be - evaluated noiselessly or at high fidelity and then use the best one. - + Get the most promising point. NOTE: Only SOO is supported. Args: experiment: The experiment to get the data from. This should contain @@ -82,7 +71,6 @@ def get_best_parameters( contain oracle values. optimization_config: The ``optimization_config`` for the corresponding ``BenchmarkProblem``. - n_points: The number of points to return. """ if isinstance(optimization_config, MultiObjectiveOptimizationConfig): raise NotImplementedError( @@ -90,10 +78,6 @@ def get_best_parameters( "supported for multi-objective problems." ) - if n_points != 1: - raise NotImplementedError( - f"Currently only n_points=1 is supported. Got {n_points=}." - ) if len(experiment.trials) == 0: raise ValueError( "Cannot identify a best point if experiment has no trials." @@ -102,10 +86,9 @@ def get_best_parameters( def _get_first_parameterization_from_last_trial() -> TParameterization: return experiment.trials[max(experiment.trials)].arms[0].parameters - # SOO, n=1 case. # Note: This has the same effect as orchestrator.get_best_parameters if len(experiment.trials_by_status[TrialStatus.COMPLETED]) == 0: - return [_get_first_parameterization_from_last_trial()] + return _get_first_parameterization_from_last_trial() result = BestPointMixin._get_best_trial( experiment=experiment, @@ -115,7 +98,6 @@ def _get_first_parameterization_from_last_trial() -> TParameterization: if result is None: # This can happen if no points are predicted to satisfy all outcome # constraints. - params = _get_first_parameterization_from_last_trial() - else: - _, params, _ = none_throws(result) - return [params] + return _get_first_parameterization_from_last_trial() + _, params, _ = none_throws(result) + return params diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index 557c99472c9..784a9c97cf1 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -91,8 +91,6 @@ class BenchmarkProblem(Base): default) or the ``inference_trace``. See ``BenchmarkResult`` for more information. Currently, this is only supported for single-objective problems. - n_best_points: Number of points for a best-point selector to recommend. - Currently, only ``n_best_points=1`` is supported. step_runtime_function: Optionally, a function that takes in ``params`` (typically dictionaries mapping strings to ``TParamValue``s) and returns the runtime of an step. If ``step_runtime_function`` is @@ -111,7 +109,6 @@ class BenchmarkProblem(Base): baseline_value: float search_space: SearchSpace = field(repr=False) report_inference_value_as_trace: bool = False - n_best_points: int = 1 step_runtime_function: TBenchmarkStepRuntimeFunction | None = None target_fidelity_and_task: Mapping[str, TParamValue] = field(default_factory=dict) status_quo_params: Mapping[str, TParamValue] | None = None @@ -121,8 +118,6 @@ class BenchmarkProblem(Base): def __post_init__(self) -> None: # Validate inputs - if self.n_best_points != 1: - raise NotImplementedError("Only `n_best_points=1` is currently supported.") if self.report_inference_value_as_trace and self.is_moo: raise NotImplementedError( "Inference trace is not supported for MOO. Please set " diff --git a/ax/benchmark/tests/methods/test_methods.py b/ax/benchmark/tests/methods/test_methods.py index 3fefe9f0ec1..2e0fe033c51 100644 --- a/ax/benchmark/tests/methods/test_methods.py +++ b/ax/benchmark/tests/methods/test_methods.py @@ -155,10 +155,8 @@ def test_get_best_parameters(self) -> None: + "get_best_parameters_from_model_predictions_with_trial_index", wraps=get_best_parameters_from_model_predictions_with_trial_index, ) as mock_get_best_parameters_from_predictions: - best_params = method.get_best_parameters( + method.get_best_parameters( experiment=experiment, optimization_config=problem.optimization_config, - n_points=1, ) mock_get_best_parameters_from_predictions.assert_called_once() - self.assertEqual(len(best_params), 1) diff --git a/ax/benchmark/tests/test_benchmark_method.py b/ax/benchmark/tests/test_benchmark_method.py index 9b56f90cd86..74b579c8df8 100644 --- a/ax/benchmark/tests/test_benchmark_method.py +++ b/ax/benchmark/tests/test_benchmark_method.py @@ -62,22 +62,15 @@ def test_get_best_parameters(self) -> None: NotImplementedError, "not currently supported for multi-objective" ): method.get_best_parameters( - experiment=experiment, optimization_config=moo_config, n_points=1 + experiment=experiment, optimization_config=moo_config ) soo_config = get_soo_opt_config(outcome_names=["a"]) - with self.subTest("Multiple points not supported"), self.assertRaisesRegex( - NotImplementedError, "only n_points=1" - ): - method.get_best_parameters( - experiment=experiment, optimization_config=soo_config, n_points=2 - ) - with self.subTest("Empty experiment"), self.assertRaisesRegex( ValueError, "Cannot identify a best point if experiment has no trials" ): method.get_best_parameters( - experiment=experiment, optimization_config=soo_config, n_points=1 + experiment=experiment, optimization_config=soo_config ) with self.subTest("All constraints violated"): @@ -86,12 +79,10 @@ def test_get_best_parameters(self) -> None: constrained=True, ) best_point = method.get_best_parameters( - n_points=1, experiment=experiment, optimization_config=none_throws(experiment.optimization_config), ) - self.assertEqual(len(best_point), 1) - self.assertEqual(best_point[0], experiment.trials[1].arms[0].parameters) + self.assertEqual(best_point, experiment.trials[1].arms[0].parameters) with self.subTest("No completed trials"): experiment = get_experiment_with_observations(observations=[]) @@ -100,9 +91,7 @@ def test_get_best_parameters(self) -> None: trial = experiment.new_trial(generator_run=sobol_generator.gen(n=1)) trial.run() best_point = method.get_best_parameters( - n_points=1, experiment=experiment, optimization_config=none_throws(experiment.optimization_config), ) - self.assertEqual(len(best_point), 1) - self.assertEqual(best_point[0], experiment.trials[2].arms[0].parameters) + self.assertEqual(best_point, experiment.trials[2].arms[0].parameters) diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index d51fe8c47f0..e1fb7ffd713 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -46,44 +46,6 @@ def setUp(self) -> None: self.maxDiff = None super().setUp() - def test_inference_value_not_implemented(self) -> None: - objectives = [ - Objective(metric=BenchmarkMetric(name, lower_is_better=True)) - for name in ["Branin", "Currin"] - ] - optimization_config = OptimizationConfig(objective=objectives[0]) - test_function = BoTorchTestFunction( - botorch_problem=Branin(), outcome_names=["Branin"] - ) - with self.assertRaisesRegex(NotImplementedError, "Only `n_best_points=1`"): - BenchmarkProblem( - name="foo", - optimization_config=optimization_config, - num_trials=1, - optimal_value=0.0, - baseline_value=1.0, - search_space=SearchSpace(parameters=[]), - test_function=test_function, - n_best_points=2, - ) - - with self.assertRaisesRegex( - NotImplementedError, "Inference trace is not supported for MOO" - ): - BenchmarkProblem( - name="foo", - optimization_config=MultiObjectiveOptimizationConfig( - objective=MultiObjective(objectives) - ), - num_trials=1, - optimal_value=0.0, - search_space=SearchSpace(parameters=[]), - baseline_value=1.0, - test_function=test_function, - n_best_points=1, - report_inference_value_as_trace=True, - ) - def test_mismatch_of_names_on_test_function_and_opt_config_raises(self) -> None: objectives = [ Objective(metric=BenchmarkMetric(name, lower_is_better=True))