Skip to content

Commit

Permalink
Merge pull request #273 from xopt-org/documentation
Browse files Browse the repository at this point in the history
Documentation
  • Loading branch information
roussel-ryan authored Feb 10, 2025
2 parents 66a1299 + 1163a65 commit 33d1a83
Show file tree
Hide file tree
Showing 29 changed files with 2,513 additions and 472 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -128,3 +128,6 @@ private_tests/*
# Don't include JupyterLab language server (LSP) files:
/.virtual_documents
*.csv

# don't include vscode settings
/.vscode
14 changes: 14 additions & 0 deletions docs/api/generators/bayesian.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,18 @@
::: xopt.generators.bayesian.bayesian_generator.BayesianGenerator
::: xopt.generators.bayesian.bayesian_exploration.BayesianExplorationGenerator
::: xopt.generators.bayesian.expected_improvement.ExpectedImprovementGenerator
::: xopt.generators.bayesian.expected_improvement.TDExpectedImprovementGenerator
::: xopt.generators.bayesian.mobo.MOBOGenerator
::: xopt.generators.bayesian.upper_confidence_bound.UpperConfidenceBoundGenerator
::: xopt.generators.bayesian.upper_confidence_bound.TDUpperConfidenceBoundGenerator
::: xopt.generators.bayesian.multi_fidelity.MultiFidelityGenerator
::: xopt.generators.bayesian.turbo.TurboController
- get_trust_region
- update_trust_region
- get_data_in_trust_region
- update_state
- reset
::: xopt.generators.bayesian.turbo.OptimizeTurboController
- minimize
::: xopt.generators.bayesian.turbo.SafetyTurboController
::: xopt.generators.bayesian.turbo.EntropyTurboController
3 changes: 1 addition & 2 deletions mkdocs.yml
Original file line number Diff line number Diff line change
Expand Up @@ -46,8 +46,6 @@ nav:
- Other:
- Extremum seeking: examples/es/extremum_seeking.ipynb
- RCDS: examples/rcds/rcds.ipynb


- API:
- Xopt: api/xopt.md
- Vocs: api/vocs.md
Expand Down Expand Up @@ -110,6 +108,7 @@ plugins:

- mkdocs-jupyter:
include_source: True
execute: True

- mkdocstrings:
default_handler: python
Expand Down
3 changes: 3 additions & 0 deletions xopt/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,9 @@ class Xopt(XoptBaseModel):
@model_validator(mode="before")
@classmethod
def validate_model(cls, data: Any):
"""
Validate the Xopt model by checking the generator and evaluator.
"""
if isinstance(data, dict):
# validate vocs
if isinstance(data["vocs"], dict):
Expand Down
217 changes: 185 additions & 32 deletions xopt/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ class Evaluator(XoptBaseModel):
NormalExecutor or any instantiated Executor object
vectorized : bool, default=False
If true, lists of evaluation points will be sent to the evaluator
function to be processed in parallel instead of evaluated seperately via
function to be processed in parallel instead of evaluated separately via
mapping.
"""

Expand All @@ -51,7 +51,20 @@ class Evaluator(XoptBaseModel):
model_config = ConfigDict(arbitrary_types_allowed=True)

@model_validator(mode="before")
def validate_all(cls, values):
def validate_all(cls, values: Dict) -> Dict:
"""
Validate all inputs before initializing the Evaluator.
Parameters
----------
values : dict
The input values to validate.
Returns
-------
dict
The validated input values.
"""
f = get_function(values["function"])
kwargs = values.get("function_kwargs", {})
kwargs = {**get_function_defaults(f), **kwargs}
Expand All @@ -62,31 +75,36 @@ def validate_all(cls, values):

executor = values.pop("executor", None)
if not executor:
if max_workers > 1:
executor = ProcessPoolExecutor(max_workers=max_workers)
else:
executor = DummyExecutor()
executor = (
ProcessPoolExecutor(max_workers=max_workers)
if max_workers > 1
else DummyExecutor()
)

# Cast as a NormalExecutor
values["executor"] = NormalExecutor[type(executor)](executor=executor)
values["max_workers"] = max_workers

return values

def evaluate(self, input: Dict, **kwargs):
def evaluate(self, input: Dict, **kwargs) -> Dict:
"""
Evaluate a single input dict using Evaluator.function with
Evaluator.function_kwargs.
Further kwargs are passed to the function.
Inputs:
inputs: dict of inputs to be evaluated
**kwargs: additional kwargs to pass to the function
Returns:
function(input, **function_kwargs_updated)
Parameters
----------
input : dict
The input dictionary to evaluate.
**kwargs : dict
Additional keyword arguments to pass to the function.
Returns
-------
dict
The evaluation result.
"""
return self.safe_function(input, **{**self.function_kwargs, **kwargs})

Expand All @@ -99,7 +117,19 @@ def evaluate_data(
Dict[str, float],
],
) -> pd.DataFrame:
"""evaluate dataframe of inputs"""
"""
Evaluate a dataframe of inputs.
Parameters
----------
input_data : Union[pd.DataFrame, List[Dict[str, float]], Dict[str, List[float]], Dict[str, float]]
The input data to evaluate.
Returns
-------
pd.DataFrame
The evaluation results.
"""
if self.vectorized:
output_data = self.safe_function(input_data, **self.function_kwargs)
else:
Expand All @@ -117,7 +147,7 @@ def evaluate_data(
kwargs = [self.function_kwargs] * len(inputs)

output_data = self.executor.map(
safe_function1_for_map,
safe_function_for_map,
funcs,
inputs,
kwargs,
Expand All @@ -127,36 +157,61 @@ def evaluate_data(
[input_data, DataFrame(output_data, index=input_data.index)], axis=1
)

def safe_function(self, *args, **kwargs):
def safe_function(self, *args, **kwargs) -> Dict:
"""
Safely call the function, handling exceptions.
Note that this should not be submitted to fuu
Parameters
----------
*args : tuple
Positional arguments to pass to the function.
**kwargs : dict
Keyword arguments to pass to the function.
Returns
-------
dict
The safe function outputs.
"""
return safe_function(self.function, *args, **kwargs)

def submit(self, input: Dict):
"""submit a single input to the executor
def submit(self, input: Dict) -> Future:
"""
Submit a single input to the executor.
Parameters
----------
input : dict
The input dictionary to submit.
Returns
-------
Future : Future object
Future
The Future object representing the submitted task.
"""
if not isinstance(input, dict):
raise ValueError("input must be a dictionary")
# return self.executor.submit(self.function, input, **self.function_kwargs)
# Must call a function outside of the classs
# Must call a function outside of the class
# See: https://stackoverflow.com/questions/44144584/typeerror-cant-pickle-thread-lock-objects
return self.executor.submit(
safe_function, self.function, input, **self.function_kwargs
)

def submit_data(self, input_data: pd.DataFrame):
"""submit dataframe of inputs to executor"""
def submit_data(self, input_data: pd.DataFrame) -> List[Future]:
"""
Submit a dataframe of inputs to the executor.
Parameters
----------
input_data : pd.DataFrame
The input data to submit.
Returns
-------
List[Future]
A list of Future objects representing the submitted tasks.
"""
input_data = pd.DataFrame(input_data) # cast to dataframe for consistency

if self.vectorized:
Expand All @@ -172,21 +227,62 @@ def submit_data(self, input_data: pd.DataFrame):
return futures


def safe_function1_for_map(function, inputs, kwargs):
def safe_function_for_map(function: Callable, inputs: Dict, kwargs: Dict) -> Dict:
"""
Safely call the function, handling exceptions.
Parameters
----------
function : Callable
The function to call.
inputs : dict
The input dictionary to pass to the function.
kwargs : dict
The keyword arguments to pass to the function.
Returns
-------
dict
The safe function outputs.
"""
return safe_function(function, inputs, **kwargs)


def safe_function(function, *args, **kwargs):
def safe_function(function: Callable, *args, **kwargs) -> Dict:
"""
Safely call the function, handling exceptions.
Parameters
----------
function : Callable
The function to call.
*args : tuple
Positional arguments to pass to the function.
**kwargs : dict
Keyword arguments to pass to the function.
Returns
-------
dict
The safe function outputs.
"""
safe_outputs = safe_call(function, *args, **kwargs)
return process_safe_outputs(safe_outputs)


def process_safe_outputs(outputs: Dict):
def process_safe_outputs(outputs: Dict) -> Dict:
"""
Process the outputs of safe_call, flattening the output.
Parameters
----------
outputs : dict
The outputs of safe_call.
Returns
-------
dict
The processed outputs.
"""
o = {}
error = False
Expand Down Expand Up @@ -214,17 +310,25 @@ def process_safe_outputs(outputs: Dict):
def validate_outputs(outputs: DataFrame):
"""
Looks for Xopt errors in the outputs and raises XoptError if found.
"""
Parameters
----------
outputs : DataFrame
The outputs to validate.
Raises
------
XoptError
If any Xopt errors are found in the outputs.
"""
# Handles dicts or dataframes
if not np.any(outputs["xopt_error"]):
return

if "xopt_non_dict_result" in outputs:
result = outputs["xopt_non_dict_result"]
raise XoptError(
"Xopt evaluator returned a non-dict result, type is: "
f"{type(result)}, result is: {result}"
f"Xopt evaluator returned a non-dict result, type is: {type(result)}, result is: {result}"
)
else:
error_string = "Xopt evaluator caught exception(s):\n\n"
Expand All @@ -248,10 +352,51 @@ def __init__(self):
self._shutdown = False
self._shutdownLock = Lock()

def map(self, fn, *iterables, timeout=None, chunksize=1):
def map(self, fn: Callable, *iterables, timeout: float = None, chunksize: int = 1):
"""
Map the function to the iterables.
Parameters
----------
fn : Callable
The function to map.
*iterables : tuple
The iterables to map the function to.
timeout : float, optional
The timeout for the map operation. Defaults to None.
chunksize : int, optional
The chunk size for the map operation. Defaults to 1.
Returns
-------
map
The map object.
"""
return map(fn, *iterables)

def submit(self, fn, *args, **kwargs):
def submit(self, fn: Callable, *args, **kwargs) -> Future:
"""
Submit a function to the executor.
Parameters
----------
fn : Callable
The function to submit.
*args : tuple
The positional arguments to pass to the function.
**kwargs : dict
The keyword arguments to pass to the function.
Returns
-------
Future
The Future object representing the submitted task.
Raises
------
RuntimeError
If the executor has been shut down.
"""
with self._shutdownLock:
if self._shutdown:
raise RuntimeError("cannot schedule new futures after shutdown")
Expand All @@ -266,6 +411,14 @@ def submit(self, fn, *args, **kwargs):

return f

def shutdown(self, wait=True):
def shutdown(self, wait: bool = True):
"""
Shut down the executor.
Parameters
----------
wait : bool, optional
Whether to wait for the executor to shut down. Defaults to True.
"""
with self._shutdownLock:
self._shutdown = True
Loading

0 comments on commit 33d1a83

Please sign in to comment.