From 36917bcc7e82ee48c31d923788cd215e1f0c54fa Mon Sep 17 00:00:00 2001 From: Elizabeth Santorella Date: Wed, 28 Aug 2024 10:59:48 -0700 Subject: [PATCH] Get rid of MultiObjectiveBenchmarkProblem (#2721) Summary: Pull Request resolved: https://github.com/facebook/Ax/pull/2721 Context: The only purpose of `MultiObjectiveBenchmarkProblem` is as a type annotation indicating whether its `optimization_config` is a `MultiObjectiveOptimizationConfig`. This creates more trouble than its worth; we can simply check whether the optimization config is multi-objective rather than checking whether the problem is multi-objective. The same goes for having single-objective and multi-objective surrogate problems. With this change, the only two `BenchmarkProblem` classes are `BenchmarkProblem` and `SurrogateBenchmarkProblem`; the latter can be removed in a future PR. This diff: * Removes `MultiObjectiveBenchmarkProblem`, and replaces references to it with `BenchmarkProblem` * Consolidates `SurrogateBenchmarkProblemBase`, `SooSurrogateBenchmarkProblem`, and `MOOSurrogateBenchmarkProblem` into `SurrogateBenchmarkProblem` and replaces references * Removes some branching logic Reviewed By: Balandat Differential Revision: D61869477 fbshipit-source-id: f5a7948d264616bf4a8e7352cf851d68ce303dc6 --- ax/benchmark/benchmark_problem.py | 19 +--------- ax/benchmark/problems/surrogate.py | 37 +++++-------------- .../tests/problems/test_surrogate_problems.py | 2 +- ax/benchmark/tests/test_benchmark_problem.py | 6 ++- ax/storage/json_store/registry.py | 7 +--- ax/utils/testing/benchmark_stubs.py | 31 +++++++--------- 6 files changed, 33 insertions(+), 69 deletions(-) diff --git a/ax/benchmark/benchmark_problem.py b/ax/benchmark/benchmark_problem.py index fbf55355f75..491bba1640d 100644 --- a/ax/benchmark/benchmark_problem.py +++ b/ax/benchmark/benchmark_problem.py @@ -256,28 +256,13 @@ def create_single_objective_problem_from_botorch( ) -@dataclass(kw_only=True, repr=True) -class MultiObjectiveBenchmarkProblem(BenchmarkProblem): - """ - A `BenchmarkProblem` that supports multiple objectives. - - For multi-objective problems, `optimal_value` indicates the maximum - hypervolume attainable with the objective thresholds provided on the - `optimization_config`. - - For argument descriptions, see `BenchmarkProblem`. - """ - - optimization_config: MultiObjectiveOptimizationConfig - - def create_multi_objective_problem_from_botorch( test_problem_class: type[MultiObjectiveTestProblem], test_problem_kwargs: dict[str, Any], # TODO: Figure out whether we should use `lower_is_better` here. num_trials: int, observe_noise_sd: bool = False, -) -> MultiObjectiveBenchmarkProblem: +) -> BenchmarkProblem: """Create a BenchmarkProblem from a BoTorch BaseTestProblem using specialized Metrics and Runners. The test problem's result will be computed on the Runner once per trial and each Metric will retrieve its own result by index. @@ -337,7 +322,7 @@ def create_multi_objective_problem_from_botorch( ], ) - return MultiObjectiveBenchmarkProblem( + return BenchmarkProblem( name=name, search_space=get_continuous_search_space(test_problem._bounds), optimization_config=optimization_config, diff --git a/ax/benchmark/problems/surrogate.py b/ax/benchmark/problems/surrogate.py index 4d9864540c1..6c188540801 100644 --- a/ax/benchmark/problems/surrogate.py +++ b/ax/benchmark/problems/surrogate.py @@ -5,48 +5,29 @@ # pyre-strict """ -Benchmark problems based on surrogates. +Benchmark problem based on surrogate. -These problems might appear to function identically to their non-surrogate -counterparts, `BenchmarkProblem` and `MultiObjectiveBenchmarkProblem`, aside -from the restriction that their runners are of type `SurrogateRunner`. However, -they are treated specially within JSON storage because surrogates cannot be -easily serialized. +This problem class might appear to function identically to its non-surrogate +counterpart, `BenchmarkProblem`, aside from the restriction that its runners is +of type `SurrogateRunner`. However, it is treated specially within JSON storage +because surrogates cannot be easily serialized. """ from dataclasses import dataclass, field from ax.benchmark.benchmark_problem import BenchmarkProblem from ax.benchmark.runners.surrogate import SurrogateRunner -from ax.core.optimization_config import MultiObjectiveOptimizationConfig @dataclass(kw_only=True) -class SurrogateBenchmarkProblemBase(BenchmarkProblem): +class SurrogateBenchmarkProblem(BenchmarkProblem): """ - Base class for SOOSurrogateBenchmarkProblem and MOOSurrogateBenchmarkProblem. + Benchmark problem whose `runner` is a `SurrogateRunner`. - Its `runner` is a `SurrogateRunner`, which allows for the surrogate to be - constructed lazily and datasets to be downloaded lazily. + `SurrogateRunner` allows for the surrogate to be constructed lazily and for + datasets to be downloaded lazily. For argument descriptions, see `BenchmarkProblem`. """ runner: SurrogateRunner = field(repr=False) - - -class SOOSurrogateBenchmarkProblem(SurrogateBenchmarkProblemBase): - pass - - -@dataclass(kw_only=True) -class MOOSurrogateBenchmarkProblem(SurrogateBenchmarkProblemBase): - """ - Has the same attributes/properties as a `MultiObjectiveBenchmarkProblem`, - but its `runner` is a `SurrogateRunner`, which allows for the surrogate to be - constructed lazily and datasets to be downloaded lazily. - - For argument descriptions, see `BenchmarkProblem`. - """ - - optimization_config: MultiObjectiveOptimizationConfig diff --git a/ax/benchmark/tests/problems/test_surrogate_problems.py b/ax/benchmark/tests/problems/test_surrogate_problems.py index e901d88b87c..0d1f941943c 100644 --- a/ax/benchmark/tests/problems/test_surrogate_problems.py +++ b/ax/benchmark/tests/problems/test_surrogate_problems.py @@ -31,7 +31,7 @@ def test_repr(self) -> None: sbp = get_soo_surrogate() expected_repr = ( - "SOOSurrogateBenchmarkProblem(name='test', " + "SurrogateBenchmarkProblem(name='test', " "optimization_config=OptimizationConfig(objective=Objective(metric_name=" '"branin", ' "minimize=True), " diff --git a/ax/benchmark/tests/test_benchmark_problem.py b/ax/benchmark/tests/test_benchmark_problem.py index 39297acb63a..5c6fca66a53 100644 --- a/ax/benchmark/tests/test_benchmark_problem.py +++ b/ax/benchmark/tests/test_benchmark_problem.py @@ -14,6 +14,7 @@ create_single_objective_problem_from_botorch, ) from ax.benchmark.runners.botorch_test import BotorchTestProblemRunner +from ax.core.optimization_config import MultiObjectiveOptimizationConfig from ax.core.types import ComparisonOp from ax.utils.common.testutils import TestCase from ax.utils.common.typeutils import checked_cast @@ -28,6 +29,7 @@ Cosine8, ) from hypothesis import given, strategies as st +from pyre_extensions import assert_is_instance class TestBenchmarkProblem(TestCase): @@ -198,7 +200,9 @@ def test_moo_from_botorch(self) -> None: # Test hypervolume self.assertEqual(branin_currin_problem.optimal_value, test_problem._max_hv) - opt_config = branin_currin_problem.optimization_config + opt_config = assert_is_instance( + branin_currin_problem.optimization_config, MultiObjectiveOptimizationConfig + ) reference_point = [ threshold.bound for threshold in opt_config.objective_thresholds ] diff --git a/ax/storage/json_store/registry.py b/ax/storage/json_store/registry.py index fdfb742ef55..001d8f2a52e 100644 --- a/ax/storage/json_store/registry.py +++ b/ax/storage/json_store/registry.py @@ -12,10 +12,7 @@ import torch from ax.benchmark.benchmark_method import BenchmarkMethod from ax.benchmark.benchmark_metric import BenchmarkMetric -from ax.benchmark.benchmark_problem import ( - BenchmarkProblem, - MultiObjectiveBenchmarkProblem, -) +from ax.benchmark.benchmark_problem import BenchmarkProblem from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult from ax.benchmark.problems.hpo.torchvision import PyTorchCNNTorchvisionParamBasedProblem from ax.benchmark.runners.botorch_test import ( @@ -338,7 +335,7 @@ "ModelRegistryBase": ModelRegistryBase, "ModelSpec": ModelSpec, "MultiObjective": MultiObjective, - "MultiObjectiveBenchmarkProblem": MultiObjectiveBenchmarkProblem, + "MultiObjectiveBenchmarkProblem": BenchmarkProblem, # backward compatibility "MultiObjectiveOptimizationConfig": MultiObjectiveOptimizationConfig, "MultiTypeExperiment": MultiTypeExperiment, "NegativeBraninMetric": NegativeBraninMetric, diff --git a/ax/utils/testing/benchmark_stubs.py b/ax/utils/testing/benchmark_stubs.py index da71fcf7f74..4cbe94c5eee 100644 --- a/ax/utils/testing/benchmark_stubs.py +++ b/ax/utils/testing/benchmark_stubs.py @@ -16,13 +16,9 @@ BenchmarkProblem, create_multi_objective_problem_from_botorch, create_single_objective_problem_from_botorch, - MultiObjectiveBenchmarkProblem, ) from ax.benchmark.benchmark_result import AggregatedBenchmarkResult, BenchmarkResult -from ax.benchmark.problems.surrogate import ( - MOOSurrogateBenchmarkProblem, - SOOSurrogateBenchmarkProblem, -) +from ax.benchmark.problems.surrogate import SurrogateBenchmarkProblem from ax.benchmark.runners.botorch_test import ParamBasedTestProblem from ax.benchmark.runners.surrogate import SurrogateRunner from ax.core.experiment import Experiment @@ -65,10 +61,12 @@ def get_single_objective_benchmark_problem( def get_multi_objective_benchmark_problem( - observe_noise_sd: bool = False, num_trials: int = 4 -) -> MultiObjectiveBenchmarkProblem: + observe_noise_sd: bool = False, + num_trials: int = 4, + test_problem_class: type[BraninCurrin] = BraninCurrin, +) -> BenchmarkProblem: return create_multi_objective_problem_from_botorch( - test_problem_class=BraninCurrin, + test_problem_class=test_problem_class, test_problem_kwargs={}, num_trials=num_trials, observe_noise_sd=observe_noise_sd, @@ -77,12 +75,11 @@ def get_multi_objective_benchmark_problem( def get_constrained_multi_objective_benchmark_problem( observe_noise_sd: bool = False, num_trials: int = 4 -) -> MultiObjectiveBenchmarkProblem: - return create_multi_objective_problem_from_botorch( - test_problem_class=ConstrainedBraninCurrin, - test_problem_kwargs={}, - num_trials=num_trials, +) -> BenchmarkProblem: + return get_multi_objective_benchmark_problem( observe_noise_sd=observe_noise_sd, + num_trials=num_trials, + test_problem_class=ConstrainedBraninCurrin, ) @@ -99,7 +96,7 @@ def get_sobol_benchmark_method() -> BenchmarkMethod: ) -def get_soo_surrogate() -> SOOSurrogateBenchmarkProblem: +def get_soo_surrogate() -> SurrogateBenchmarkProblem: experiment = get_branin_experiment(with_completed_trial=True) surrogate = TorchModelBridge( experiment=experiment, @@ -123,7 +120,7 @@ def get_soo_surrogate() -> SOOSurrogateBenchmarkProblem: ) optimization_config = OptimizationConfig(objective=objective) - return SOOSurrogateBenchmarkProblem( + return SurrogateBenchmarkProblem( name="test", search_space=experiment.search_space, optimization_config=optimization_config, @@ -134,7 +131,7 @@ def get_soo_surrogate() -> SOOSurrogateBenchmarkProblem: ) -def get_moo_surrogate() -> MOOSurrogateBenchmarkProblem: +def get_moo_surrogate() -> SurrogateBenchmarkProblem: experiment = get_branin_experiment_with_multi_objective(with_completed_trial=True) surrogate = TorchModelBridge( experiment=experiment, @@ -171,7 +168,7 @@ def get_moo_surrogate() -> MOOSurrogateBenchmarkProblem: ], ) ) - return MOOSurrogateBenchmarkProblem( + return SurrogateBenchmarkProblem( name="test", search_space=experiment.search_space, optimization_config=optimization_config,