From f7227a27d174f46f84ddf90763fcf4412cb41a0c Mon Sep 17 00:00:00 2001 From: Miles Olson Date: Thu, 13 Feb 2025 11:00:17 -0800 Subject: [PATCH] Plot progressions for timeseries-like objectives (#3354) Summary: Pull Request resolved: https://github.com/facebook/Ax/pull/3354 Updated choose_analyses as titled. Reviewed By: saitcakmak Differential Revision: D69537664 fbshipit-source-id: 94e65ce07f400699cdca17c7d14e172d33262182 --- ax/analysis/utils.py | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/ax/analysis/utils.py b/ax/analysis/utils.py index 9e7ae628097..f22383db8c1 100644 --- a/ax/analysis/utils.py +++ b/ax/analysis/utils.py @@ -11,6 +11,7 @@ from ax.analysis.plotly.cross_validation import CrossValidationPlot from ax.analysis.plotly.interaction import InteractionPlot from ax.analysis.plotly.parallel_coordinates import ParallelCoordinatesPlot +from ax.analysis.plotly.progression import ProgressionPlot from ax.analysis.plotly.scatter import ScatterPlot from ax.analysis.summary import Summary from ax.core.experiment import Experiment @@ -22,9 +23,12 @@ def choose_analyses(experiment: Experiment) -> list[Analysis]: Choose a default set of Analyses to compute based on the current state of the Experiment. """ + # If there is no optimization config choose to plot just the Summary. if (optimization_config := experiment.optimization_config) is None: - return [] + return [Summary()] + # In the multi-objective case plot Scatters showing the Pareto frontier for each + # pair of objectives and Interactions for each objective. if isinstance(optimization_config.objective, MultiObjective) or isinstance( optimization_config.objective, ScalarizedObjective ): @@ -44,7 +48,8 @@ def choose_analyses(experiment: Experiment) -> list[Analysis]: InteractionPlot(metric_name=name) for name in optimization_config.objective.metric_names ] - + # In the single-objective case plot ParallelCoordinates and up to six ScatterPlots + # for the objective versus other metrics. else: objective_name = optimization_config.objective.metric.name # ParallelCoorindates and leave-one-out cross validation @@ -70,9 +75,27 @@ def choose_analyses(experiment: Experiment) -> list[Analysis]: interactions = [InteractionPlot(metric_name=objective_name)] + # If any number of objectives are timeseries-like plot their progression. + data = experiment.lookup_data() + progressions = [ + ProgressionPlot(metric_name=metric) + for metric in optimization_config.metrics + # Only include the progression plot if the metric is timeseries-like, i.e. the + # true_df has more rows then the condensed df. + if sum(data.df["metric_name"] == metric) + != sum(data.true_df["metric_name"] == metric) + ] + # Leave-one-out cross validation for each objective and outcome constraint cv_plots = [ CrossValidationPlot(metric_name=name) for name in optimization_config.metrics ] - return [*objective_plots, *other_scatters, *interactions, *cv_plots, Summary()] + return [ + *objective_plots, + *other_scatters, + *progressions, + *interactions, + *cv_plots, + Summary(), + ]