Skip to content

Commit

Permalink
wip modifications to get analysis to render in notebooks, see if anyt…
Browse files Browse the repository at this point in the history
…hing breaks if we update analysis_Base, Temporary Commit at 2/25/2025, 1:16:09 PM

Summary: ....

Differential Revision: D69725960
  • Loading branch information
Mia Garrard authored and facebook-github-bot committed Feb 27, 2025
1 parent 2339fb0 commit d42927a
Showing 1 changed file with 41 additions and 47 deletions.
88 changes: 41 additions & 47 deletions ax/service/utils/analysis_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,24 +4,21 @@
# LICENSE file in the root directory of this source tree.

# pyre-strict
import traceback
from collections.abc import Iterable

import pandas as pd

from ax.analysis.analysis import (
Analysis,
AnalysisCard,
AnalysisCardCategory,
AnalysisCardLevel,
AnalysisE,
display_cards,
)
from ax.analysis.markdown.markdown_analysis import (
markdown_analysis_card_from_analysis_e,
)
from ax.analysis.markdown.markdown_analysis import MarkdownAnalysisCard
from ax.analysis.plotly.parallel_coordinates import ParallelCoordinatesPlot
from ax.core.experiment import Experiment
from ax.generation_strategy.generation_strategy import GenerationStrategy
from ax.service.utils.with_db_settings_base import WithDBSettingsBase
from pyre_extensions import assert_is_instance


class AnalysisBase(WithDBSettingsBase):
Expand All @@ -37,7 +34,9 @@ class AnalysisBase(WithDBSettingsBase):
# is never initialized
generation_strategy: GenerationStrategy

def _choose_analyses(self) -> list[Analysis]:
def _choose_analyses(
self, plot_categories: list[AnalysisCardCategory] | None = None
) -> list[Analysis]:
"""
Choose Analyses to compute based on the Experiment, GenerationStrategy, etc.
"""
Expand All @@ -46,20 +45,36 @@ def _choose_analyses(self) -> list[Analysis]:
return [ParallelCoordinatesPlot()]

def compute_analyses(
self, analyses: Iterable[Analysis] | None = None
self,
analyses: Iterable[Analysis] | None = None,
display: bool = True,
) -> list[AnalysisCard]:
"""
Compute Analyses for the Experiment and GenerationStrategy associated with this
Scheduler instance and save them to the DB if possible. If an Analysis fails to
compute (e.g. due to a missing metric), it will be skipped and a warning will
be logged.
Compute AnalysisCards (data about the optimization for end-user consumption)
using the Experiment and GenerationStrategy. If no analyses are provided use
some heuristic to determine which analyses to run. If some analyses fail, log
failure and continue to compute the rest.
Note that the Analysis class is NOT part of the API and its methods are subject
to change incompatibly between minor versions. Users are encouraged to use the
provided analyses or leave this argument as None to use the default analyses.
Saves to database on completion if storage_config is present.
Args:
analyses: Analyses to compute. If None, the Scheduler will choose a set of
Analyses to compute based on the Experiment and GenerationStrategy.
analyses: A list of Analysis classes to run. If None Ax will choose which
analyses to run based on the state of the experiment.
display: Whether to display the AnalysisCards if executed in an interactive
environment (e.g. Jupyter). Defaults to True. If not in an interactive
environment this setting has no effect.
Returns:
A list of AnalysisCards.
"""

analyses = analyses if analyses is not None else self._choose_analyses()

# Compute Analyses one by one and accumulate Results holding either the
# AnalysisCard or an Exception and some metadata
results = [
analysis.compute_result(
experiment=self.experiment,
Expand All @@ -68,40 +83,19 @@ def compute_analyses(
for analysis in analyses
]

# TODO Accumulate Es into their own card, perhaps via unwrap_or_else
cards = [result.unwrap() for result in results if result.is_ok()]

for result in results:
if result.is_err():
e = assert_is_instance(
result.err,
AnalysisE,
)
traceback_str = "".join(
traceback.format_exception(
type(result.err.exception),
e.exception,
e.exception.__traceback__,
)
)
cards.append(
MarkdownAnalysisCard(
name=e.analysis.name,
# It would be better if we could reliably compute the title
# without risking another error
title=f"{e.analysis.name} Error",
subtitle=f"An error occurred while computing {e.analysis}",
attributes=e.analysis.attributes,
blob=traceback_str,
df=pd.DataFrame(),
level=AnalysisCardLevel.DEBUG,
category=AnalysisCardCategory.ERROR,
)
)
# Turn Exceptions into MarkdownAnalysisCards with the traceback as the message
cards = [
result.unwrap_or_else(markdown_analysis_card_from_analysis_e)
for result in results
]

# Display the AnalysisCards if requested and if the user is in a notebook
if display:
display_cards(cards=cards)

# Save the AnalysisCards to the database if possible
self._save_analysis_cards_to_db_if_possible(
analysis_cards=cards,
experiment=self.experiment,
experiment=self.experiment, analysis_cards=cards
)

return cards

0 comments on commit d42927a

Please sign in to comment.