#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import warnings
from collections import defaultdict
from copy import deepcopy
from functools import partial
from logging import Logger
from typing import (
Callable,
Dict,
Iterator,
List,
MutableMapping,
Optional,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import numpy as np
import torch
from ax.core.base_trial import TrialStatus
from ax.core.batch_trial import BatchTrial
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.objective import MultiObjective, Objective, ScalarizedObjective
from ax.core.observation import Observation, ObservationData, ObservationFeatures
from ax.core.optimization_config import (
MultiObjectiveOptimizationConfig,
OptimizationConfig,
TRefPoint,
)
from ax.core.outcome_constraint import (
ComparisonOp,
OutcomeConstraint,
ScalarizedOutcomeConstraint,
)
from ax.core.parameter import ChoiceParameter, Parameter, ParameterType, RangeParameter
from ax.core.parameter_constraint import ParameterConstraint
from ax.core.risk_measures import RiskMeasure
from ax.core.search_space import (
RobustSearchSpace,
RobustSearchSpaceDigest,
SearchSpace,
SearchSpaceDigest,
)
from ax.core.trial import Trial
from ax.core.types import TBounds, TCandidateMetadata
from ax.exceptions.core import UnsupportedError, UserInputError
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.derelativize import Derelativize
from ax.models.torch.botorch_moo_defaults import pareto_frontier_evaluator
from ax.models.torch.frontier_utils import (
get_weighted_mc_objective_and_objective_thresholds,
)
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import (
checked_cast_optional,
checked_cast_to_tuple,
not_none,
)
from botorch.acquisition.multi_objective.multi_output_risk_measures import (
IndependentCVaR,
IndependentVaR,
MARS,
MultiOutputExpectation,
MVaR,
)
from botorch.acquisition.risk_measures import (
CVaR,
Expectation,
RiskMeasureMCObjective,
VaR,
WorstCase,
)
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from torch import Tensor
logger: Logger = get_logger(__name__)
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401 # pragma: no cover
"""A mapping of risk measure names to the corresponding classes.
NOTE: This can be extended with user-defined risk measure classes by
importing the dictionary and adding the new risk measure class as
`RISK_MEASURE_NAME_TO_CLASS["my_risk_measure"] = MyRiskMeasure`.
An example of this is found in `tests/test_risk_measure`.
"""
RISK_MEASURE_NAME_TO_CLASS: Dict[str, Type[RiskMeasureMCObjective]] = {
"Expectation": Expectation,
"CVaR": CVaR,
"MARS": MARS,
"MVaR": MVaR,
"IndependentCVaR": IndependentCVaR,
"IndependentVaR": IndependentVaR,
"MultiOutputExpectation": MultiOutputExpectation,
"VaR": VaR,
"WorstCase": WorstCase,
}
[docs]def check_has_multi_objective_and_data(
experiment: Experiment,
data: Data,
optimization_config: Optional[OptimizationConfig] = None,
) -> None:
"""Raise an error if not using a `MultiObjective` or if the data is empty."""
optimization_config = not_none(
optimization_config or experiment.optimization_config
)
if not isinstance(optimization_config.objective, MultiObjective):
raise ValueError("Multi-objective optimization requires multiple objectives.")
if data.df.empty:
raise ValueError("MultiObjectiveOptimization requires non-empty data.")
[docs]def get_fixed_features(
fixed_features: ObservationFeatures, param_names: List[str]
) -> Optional[Dict[int, float]]:
"""Reformat a set of fixed_features."""
fixed_features_dict = {}
for p_name, val in fixed_features.parameters.items():
# These all need to be floats at this point.
# pyre-ignore[6]: All float here.
val_ = float(val)
fixed_features_dict[param_names.index(p_name)] = val_
fixed_features_dict = fixed_features_dict if len(fixed_features_dict) > 0 else None
return fixed_features_dict
[docs]def pending_observations_as_array_list(
pending_observations: Dict[str, List[ObservationFeatures]],
outcome_names: List[str],
param_names: List[str],
) -> Optional[List[np.ndarray]]:
"""Re-format pending observations.
Args:
pending_observations: List of raw numpy pending observations.
outcome_names: List of outcome names.
param_names: List fitted param names.
Returns:
Filtered pending observations data, by outcome and param names.
"""
if len(pending_observations) == 0:
return None
pending = [np.array([]) for _ in outcome_names]
for metric_name, po_list in pending_observations.items():
# It is possible that some metrics attached to the experiment should
# not be included in pending features for a given model. For example,
# if a model is fit to the initial data that is missing some of the
# metrics on the experiment or if a model just should not be fit for
# some of the metrics attached to the experiment, so metrics that
# appear in pending_observations (drawn from an experiment) but not
# in outcome_names (metrics, expected for the model) are filtered out.
if metric_name not in outcome_names:
continue
pending[outcome_names.index(metric_name)] = np.array(
[[po.parameters[p] for p in param_names] for po in po_list]
)
return pending
[docs]def parse_observation_features(
X: np.ndarray,
param_names: List[str],
candidate_metadata: Optional[List[TCandidateMetadata]] = None,
) -> List[ObservationFeatures]:
"""Re-format raw model-generated candidates into ObservationFeatures.
Args:
param_names: List of param names.
X: Raw np.ndarray of candidate values.
candidate_metadata: Model's metadata for candidates it produced.
Returns:
List of candidates, represented as ObservationFeatures.
"""
if candidate_metadata and len(candidate_metadata) != len(X):
raise ValueError( # pragma: no cover
"Observations metadata list provided is not of "
"the same size as the number of candidates."
)
observation_features = []
for i, x in enumerate(X):
observation_features.append(
ObservationFeatures(
parameters=dict(zip(param_names, x)),
metadata=candidate_metadata[i] if candidate_metadata else None,
)
)
return observation_features
[docs]def get_pending_observation_features(
experiment: Experiment, include_failed_as_pending: bool = False
) -> Optional[Dict[str, List[ObservationFeatures]]]:
"""Computes a list of pending observation features (corresponding to arms that
have been generated and deployed in the course of the experiment, but have not
been completed with data or to arms that have been abandoned or belong to
abandoned trials).
NOTE: Pending observation features are passed to the model to
instruct it to not generate the same points again.
Args:
experiment: Experiment, pending features on which we seek to compute.
include_failed_as_pending: Whether to include failed trials as pending
(for example, to avoid the model suggesting them again).
Returns:
An optional mapping from metric names to a list of observation features,
pending for that metric (i.e. do not have evaluation data for that metric).
If there are no pending features for any of the metrics, return is None.
"""
pending_features = {}
# Note that this assumes that if a metric appears in fetched data, the trial is
# not pending for the metric. Where only the most recent data matters, this will
# work, but may need to add logic to check previously added data objects, too.
for trial_index, trial in experiment.trials.items():
dat = trial.lookup_data()
for metric_name in experiment.metrics:
if metric_name not in pending_features:
pending_features[metric_name] = []
include_since_failed = include_failed_as_pending and trial.status.is_failed
if isinstance(trial, BatchTrial):
if trial.status.is_abandoned or (
(trial.status.is_deployed or include_since_failed)
and metric_name not in dat.df.metric_name.values
and trial.arms is not None
):
for arm in trial.arms:
not_none(pending_features.get(metric_name)).append(
ObservationFeatures.from_arm(
arm=arm,
trial_index=np.int64(trial_index),
metadata=trial._get_candidate_metadata(
arm_name=arm.name
),
)
)
abandoned_arms = trial.abandoned_arms
for abandoned_arm in abandoned_arms:
not_none(pending_features.get(metric_name)).append(
ObservationFeatures.from_arm(
arm=abandoned_arm,
trial_index=np.int64(trial_index),
metadata=trial._get_candidate_metadata(
arm_name=abandoned_arm.name
),
)
)
if isinstance(trial, Trial):
if trial.status.is_abandoned or (
(trial.status.is_deployed or include_since_failed)
and metric_name not in dat.df.metric_name.values
and trial.arm is not None
):
not_none(pending_features.get(metric_name)).append(
ObservationFeatures.from_arm(
arm=not_none(trial.arm),
trial_index=np.int64(trial_index),
metadata=trial._get_candidate_metadata(
arm_name=not_none(trial.arm).name
),
)
)
return pending_features if any(x for x in pending_features.values()) else None
[docs]def get_pending_observation_features_based_on_trial_status(
experiment: Experiment,
) -> Optional[Dict[str, List[ObservationFeatures]]]:
"""A faster analogue of ``get_pending_observation_features`` that makes
assumptions about trials in experiment in order to speed up extraction
of pending points.
Assumptions:
* All arms in all trials in ``STAGED,`` ``RUNNING`` and ``ABANDONED`` statuses
are to be considered pending for all outcomes.
* All arms in all trials in other statuses are to be considered not pending for
all outcomes.
This entails:
* No actual data-fetching for trials to determine whether arms in them are pending
for specific outcomes.
* Even if data is present for some outcomes in ``RUNNING`` trials, their arms will
still be considered pending for those outcomes.
NOTE: This function should not be used to extract pending features in field
experiments, where arms in running trials should not be considered pending if
there is data for those arms.
Args:
experiment: Experiment, pending features on which we seek to compute.
Returns:
An optional mapping from metric names to a list of observation features,
pending for that metric (i.e. do not have evaluation data for that metric).
If there are no pending features for any of the metrics, return is None.
"""
pending_features = defaultdict(list)
for status in [TrialStatus.STAGED, TrialStatus.RUNNING, TrialStatus.ABANDONED]:
for trial in experiment.trials_by_status[status]:
for metric_name in experiment.metrics:
pending_features[metric_name].extend(
ObservationFeatures.from_arm(
arm=arm,
trial_index=np.int64(trial.index),
metadata=trial._get_candidate_metadata(arm_name=arm.name),
)
for arm in trial.arms
)
return dict(pending_features) if any(x for x in pending_features.values()) else None
[docs]def get_pareto_frontier_and_configs(
modelbridge: modelbridge_module.torch.TorchModelBridge,
observation_features: List[ObservationFeatures],
observation_data: Optional[List[ObservationData]] = None,
objective_thresholds: Optional[TRefPoint] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
arm_names: Optional[List[Optional[str]]] = None,
use_model_predictions: bool = True,
transform_outcomes_and_configs: Optional[bool] = None,
) -> Tuple[List[Observation], Tensor, Tensor, Optional[Tensor]]:
"""Helper that applies transforms and calls ``frontier_evaluator``.
Returns the ``frontier_evaluator`` configs in addition to the Pareto
observations.
Args:
modelbridge: ``Modelbridge`` used to predict metrics outcomes.
observation_features: Observation features to consider for the Pareto
frontier.
observation_data: Data for computing the Pareto front, unless
``observation_features`` are provided and ``model_predictions is True``.
objective_thresholds: Metric values bounding the region of interest in
the objective outcome space; used to override objective thresholds
specified in ``optimization_config``, if necessary.
optimization_config: Multi-objective optimization config.
arm_names: Arm names for each observation in ``observation_features``.
use_model_predictions: If ``True``, will use model predictions at
``observation_features`` to compute Pareto front. If ``False``,
will use ``observation_data`` directly to compute Pareto front, ignoring
``observation_features``.
transform_outcomes_and_configs: Deprecated and must be ``False`` if provided.
Previously, if ``True``, would transform the optimization
config, observation features and observation data, before calling
``frontier_evaluator``, then will untransform all of the above before
returning the observations.
Returns: Four-item tuple of:
- frontier_observations: Observations of points on the pareto frontier,
- f: n x m tensor representation of the Pareto frontier values where n is the
length of frontier_observations and m is the number of metrics,
- obj_w: m tensor of objective weights,
- obj_t: m tensor of objective thresholds corresponding to Y, or None if no
objective thresholds used.
"""
if transform_outcomes_and_configs is None:
warnings.warn(
"FYI: The default behavior of `get_pareto_frontier_and_configs` when "
"`transform_outcomes_and_configs` is not specified has changed. Previously,"
" the default was `transform_outcomes_and_configs=True`; now this argument "
"is deprecated and behavior is as if "
"`transform_outcomes_and_configs=False`. You did not specify "
"`transform_outcomes_and_configs`, so this warning requires no action."
)
elif transform_outcomes_and_configs:
raise UnsupportedError(
"`transform_outcomes_and_configs=True` is no longer supported, and the "
"`transform_outcomes_and_configs` argument is deprecated. Please do not "
"specify this argument."
)
else:
warnings.warn(
"You passed `transform_outcomes_and_configs=False`. Specifying "
"`transform_outcomes_and_configs` at all is deprecated because `False` is "
"now the only allowed behavior. In the future, this will become an error.",
DeprecationWarning,
)
# Input validation
if use_model_predictions:
if observation_data is not None:
warnings.warn(
"You provided `observation_data` when `use_model_predictions` is True; "
"`observation_data` will not be used."
)
else:
if observation_data is None:
raise ValueError(
"`observation_data` must not be None when `use_model_predictions` is "
"True."
)
array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
if use_model_predictions:
observation_data = modelbridge._predict_observation_data(
observation_features=observation_features
)
Y, Yvar = observation_data_to_array(
outcomes=modelbridge.outcomes, observation_data=not_none(observation_data)
)
Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))
if arm_names is None:
arm_names = [None] * len(observation_features)
# Extract optimization config: make sure that the problem is a MOO
# problem and clone the optimization config with specified
# `objective_thresholds` if those are provided. If `optimization_config`
# is not specified, uses the one stored on `modelbridge`.
optimization_config = _get_multiobjective_optimization_config(
modelbridge=modelbridge,
optimization_config=optimization_config,
objective_thresholds=objective_thresholds,
)
# Transform optimization config.
fixed_features = ObservationFeatures(parameters={})
# de-relativize outcome constraints and objective thresholds
observations = modelbridge.get_training_data()
tf = Derelativize(
search_space=modelbridge.model_space.clone(),
observations=observations,
config={"use_raw_status_quo": True},
)
# pyre-ignore [9]
optimization_config = tf.transform_optimization_config(
optimization_config=optimization_config.clone(),
modelbridge=modelbridge,
fixed_features=fixed_features,
)
# Extract weights, constraints, and objective_thresholds
objective_weights = extract_objective_weights(
objective=optimization_config.objective, outcomes=modelbridge.outcomes
)
outcome_constraints = extract_outcome_constraints(
outcome_constraints=optimization_config.outcome_constraints,
outcomes=modelbridge.outcomes,
)
obj_t = extract_objective_thresholds(
objective_thresholds=optimization_config.objective_thresholds,
objective=optimization_config.objective,
outcomes=modelbridge.outcomes,
)
obj_t = array_to_tensor(obj_t)
# Transform to tensors.
obj_w, oc_c, _, _, _ = validate_and_apply_final_transform(
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=None,
pending_observations=None,
final_transform=array_to_tensor,
)
f, cov, indx = pareto_frontier_evaluator(
model=None,
X=None,
Y=Y,
Yvar=Yvar,
objective_thresholds=obj_t,
objective_weights=obj_w,
outcome_constraints=oc_c,
)
f, cov = f.detach().cpu().clone(), cov.detach().cpu().clone()
indx = indx.tolist()
frontier_observation_data = array_to_observation_data(
f=f.numpy(), cov=cov.numpy(), outcomes=not_none(modelbridge.outcomes)
)
# Construct observations
frontier_observations = []
for i, obsd in enumerate(frontier_observation_data):
frontier_observations.append(
Observation(
features=deepcopy(observation_features[indx[i]]),
data=deepcopy(obsd),
arm_name=arm_names[indx[i]],
)
)
return frontier_observations, f, obj_w.cpu(), obj_t.cpu()
[docs]def pareto_frontier(
modelbridge: modelbridge_module.torch.TorchModelBridge,
observation_features: List[ObservationFeatures],
observation_data: Optional[List[ObservationData]] = None,
objective_thresholds: Optional[TRefPoint] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
arm_names: Optional[List[Optional[str]]] = None,
use_model_predictions: bool = True,
) -> List[Observation]:
"""Compute the list of points on the Pareto frontier as `Observation`-s
in the untransformed search space.
Args:
modelbridge: ``Modelbridge`` used to predict metrics outcomes.
observation_features: Observation features to consider for the Pareto
frontier.
observation_data: Data for computing the Pareto front, unless
``observation_features`` are provided and ``model_predictions is True``.
objective_thresholds: Metric values bounding the region of interest in
the objective outcome space; used to override objective thresholds
specified in ``optimization_config``, if necessary.
optimization_config: Multi-objective optimization config.
arm_names: Arm names for each observation in ``observation_features``.
use_model_predictions: If ``True``, will use model predictions at
``observation_features`` to compute Pareto front. If ``False``,
will use ``observation_data`` directly to compute Pareto front, ignoring
``observation_features``.
Returns: Points on the Pareto frontier as `Observation`-s.
"""
return get_pareto_frontier_and_configs(
modelbridge=modelbridge,
observation_features=observation_features,
observation_data=observation_data,
objective_thresholds=objective_thresholds,
optimization_config=optimization_config,
arm_names=arm_names,
use_model_predictions=use_model_predictions,
)[0]
[docs]def predicted_pareto_frontier(
modelbridge: modelbridge_module.torch.TorchModelBridge,
objective_thresholds: Optional[TRefPoint] = None,
observation_features: Optional[List[ObservationFeatures]] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
) -> List[Observation]:
"""Generate a Pareto frontier based on the posterior means of given
observation features. Given a model and optionally features to evaluate
(will use model training data if not specified), use the model to predict
which points lie on the Pareto frontier.
Args:
modelbridge: ``Modelbridge`` used to predict metrics outcomes.
observation_features: Observation features to predict, if provided and
``use_model_predictions is True``.
objective_thresholds: Metric values bounding the region of interest in
the objective outcome space; used to override objective thresholds
specified in ``optimization_config``, if necessary.
optimization_config: Multi-objective optimization config.
Returns:
Observations representing points on the Pareto frontier.
"""
if observation_features is None:
observation_features, _, arm_names = _get_modelbridge_training_data(
modelbridge=modelbridge
)
else:
arm_names = None
if not observation_features:
raise ValueError(
"Must receive observation_features as input or the model must "
"have training data."
)
pareto_observations = pareto_frontier(
modelbridge=modelbridge,
objective_thresholds=objective_thresholds,
observation_features=observation_features,
optimization_config=optimization_config,
arm_names=arm_names,
)
return pareto_observations
[docs]def observed_pareto_frontier(
modelbridge: modelbridge_module.torch.TorchModelBridge,
objective_thresholds: Optional[TRefPoint] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
) -> List[Observation]:
"""Generate a pareto frontier based on observed data. Given observed data
(sourced from model training data), return points on the Pareto frontier
as `Observation`-s.
Args:
modelbridge: ``Modelbridge`` that holds previous training data.
objective_thresholds: Metric values bounding the region of interest in
the objective outcome space; used to override objective thresholds
in the optimization config, if needed.
optimization_config: Multi-objective optimization config.
Returns:
Data representing points on the pareto frontier.
"""
# Get observation_data from current training data
obs_feats, obs_data, arm_names = _get_modelbridge_training_data(
modelbridge=modelbridge
)
pareto_observations = pareto_frontier(
modelbridge=modelbridge,
objective_thresholds=objective_thresholds,
observation_data=obs_data,
observation_features=obs_feats,
optimization_config=optimization_config,
arm_names=arm_names,
use_model_predictions=False,
)
return pareto_observations
[docs]def hypervolume(
modelbridge: modelbridge_module.torch.TorchModelBridge,
observation_features: List[ObservationFeatures],
objective_thresholds: Optional[TRefPoint] = None,
observation_data: Optional[List[ObservationData]] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
selected_metrics: Optional[List[str]] = None,
use_model_predictions: bool = True,
) -> float:
"""Helper function that computes (feasible) hypervolume.
Args:
modelbridge: The modelbridge.
observation_features: The observation features for the in-sample arms.
objective_thresholds: The objective thresholds to be used for computing
the hypervolume. If None, these are extracted from the optimization
config.
observation_data: The observed outcomes for the in-sample arms.
optimization_config: The optimization config specifying the objectives,
objectives thresholds, and outcome constraints.
selected_metrics: A list of objective metric names specifying which
objectives to use in hypervolume computation. By default, all
objectives are used.
use_model_predictions: A boolean indicating whether to use model predictions
for determining the in-sample Pareto frontier instead of the raw observed
values.
Returns:
The (feasible) hypervolume.
"""
frontier_observations, f, obj_w, obj_t = get_pareto_frontier_and_configs(
modelbridge=modelbridge,
observation_features=observation_features,
observation_data=observation_data,
objective_thresholds=objective_thresholds,
optimization_config=optimization_config,
use_model_predictions=use_model_predictions,
)
if obj_t is None:
raise ValueError( # pragma: no cover
"Cannot compute hypervolume without having objective thresholds specified."
)
oc = _get_multiobjective_optimization_config(
modelbridge=modelbridge,
optimization_config=optimization_config,
objective_thresholds=objective_thresholds,
)
# Set to all metrics if unspecified
if selected_metrics is None:
selected_metrics = oc.objective.metric_names
# filter to only include objectives
else:
if any(m not in oc.objective.metric_names for m in selected_metrics):
raise ValueError("All selected metrics must be objectives.")
# Create a mask indicating selected metrics
selected_metrics_mask = torch.tensor(
[metric in selected_metrics for metric in modelbridge.outcomes],
dtype=torch.bool,
device=f.device,
)
# Apply appropriate weights and thresholds
obj, obj_t = get_weighted_mc_objective_and_objective_thresholds(
objective_weights=obj_w, objective_thresholds=not_none(obj_t)
)
f_t = obj(f)
obj_mask = obj_w.nonzero().view(-1)
selected_metrics_mask = selected_metrics_mask[obj_mask]
f_t = f_t[:, selected_metrics_mask]
obj_t = obj_t[selected_metrics_mask]
bd = DominatedPartitioning(ref_point=obj_t, Y=f_t)
return bd.compute_hypervolume().item()
def _get_multiobjective_optimization_config(
modelbridge: modelbridge_module.torch.TorchModelBridge,
optimization_config: Optional[OptimizationConfig] = None,
objective_thresholds: Optional[TRefPoint] = None,
) -> MultiObjectiveOptimizationConfig:
# Optimization_config
mooc = optimization_config or checked_cast_optional(
MultiObjectiveOptimizationConfig, modelbridge._optimization_config
)
if not mooc:
raise ValueError( # pragma: no cover
(
"Experiment must have an existing optimization_config "
"of type `MultiObjectiveOptimizationConfig` "
"or `optimization_config` must be passed as an argument."
)
)
if not isinstance(mooc, MultiObjectiveOptimizationConfig):
raise ValueError( # pragma: no cover
"optimization_config must be a MultiObjectiveOptimizationConfig."
)
if objective_thresholds:
mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)
return mooc
[docs]def predicted_hypervolume(
modelbridge: modelbridge_module.torch.TorchModelBridge,
objective_thresholds: Optional[TRefPoint] = None,
observation_features: Optional[List[ObservationFeatures]] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
selected_metrics: Optional[List[str]] = None,
) -> float:
"""Calculate hypervolume of a pareto frontier based on the posterior means of
given observation features.
Given a model and features to evaluate calculate the hypervolume of the pareto
frontier formed from their predicted outcomes.
Args:
modelbridge: Modelbridge used to predict metrics outcomes.
objective_thresholds: point defining the origin of hyperrectangles that
can contribute to hypervolume.
observation_features: observation features to predict. Model's training
data used by default if unspecified.
optimization_config: Optimization config
selected_metrics: If specified, hypervolume will only be evaluated on
the specified subset of metrics. Otherwise, all metrics will be used.
Returns:
calculated hypervolume.
"""
if observation_features is None:
(
observation_features,
_,
__,
) = _get_modelbridge_training_data( # pragma: no cover
modelbridge=modelbridge
)
if not observation_features:
raise ValueError(
"Must receive observation_features as input or the model must "
"have training data."
)
return hypervolume(
modelbridge=modelbridge,
objective_thresholds=objective_thresholds,
observation_features=observation_features,
optimization_config=optimization_config,
selected_metrics=selected_metrics,
)
[docs]def observed_hypervolume(
modelbridge: modelbridge_module.torch.TorchModelBridge,
objective_thresholds: Optional[TRefPoint] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
selected_metrics: Optional[List[str]] = None,
) -> float:
"""Calculate hypervolume of a pareto frontier based on observed data.
Given observed data, return the hypervolume of the pareto frontier formed from
those outcomes.
Args:
modelbridge: Modelbridge that holds previous training data.
objective_thresholds: point defining the origin of hyperrectangles that
can contribute to hypervolume.
observation_features: observation features to predict. Model's training
data used by default if unspecified.
optimization_config: Optimization config
selected_metrics: If specified, hypervolume will only be evaluated on
the specified subset of metrics. Otherwise, all metrics will be used.
Returns:
(float) calculated hypervolume.
"""
# Get observation_data from current training data.
obs_feats, obs_data, _ = _get_modelbridge_training_data(modelbridge=modelbridge)
return hypervolume(
modelbridge=modelbridge,
objective_thresholds=objective_thresholds,
observation_features=obs_feats,
observation_data=obs_data,
optimization_config=optimization_config,
selected_metrics=selected_metrics,
use_model_predictions=False,
)
[docs]def array_to_observation_data(
f: np.ndarray, cov: np.ndarray, outcomes: List[str]
) -> List[ObservationData]:
"""Convert arrays of model predictions to a list of ObservationData.
Args:
f: An (n x m) array
cov: An (n x m x m) array
outcomes: A list of d outcome names
Returns: A list of n ObservationData
"""
observation_data = []
for i in range(f.shape[0]):
observation_data.append(
ObservationData(
metric_names=list(outcomes),
means=f[i, :].copy(),
covariance=cov[i, :, :].copy(),
)
)
return observation_data
[docs]def observation_data_to_array(
outcomes: List[str],
observation_data: List[ObservationData],
) -> Tuple[np.ndarray, np.ndarray]:
"""Convert a list of Observation data to arrays.
Args:
observation_data: A list of n ObservationData
Returns:
An array of n ObservationData, each containing
- f: An (n x m) array
- cov: An (n x m x m) array
"""
means = []
cov = []
for obsd in observation_data:
metric_idxs = np.array([obsd.metric_names.index(m) for m in outcomes])
means.append(obsd.means[metric_idxs])
cov.append(obsd.covariance[metric_idxs][:, metric_idxs])
return np.array(means), np.array(cov)
[docs]def observation_features_to_array(
parameters: List[str], obsf: List[ObservationFeatures]
) -> np.ndarray:
"""Convert a list of Observation features to arrays."""
return np.array([[of.parameters[p] for p in parameters] for of in obsf])
[docs]def detect_duplicates(
X: Tensor,
rtol: float = 1e-5,
atol: float = 1e-8,
) -> Iterator[Tuple[int, int]]:
"""Returns an iterator over index pairs `(duplicate index, original index)` for all
duplicate entries of `X`.
"""
tols = atol
if rtol:
rval = X.abs().max(dim=-1, keepdim=True).values
tols = tols + rtol * rval.max(rval.transpose(-1, -2))
n = X.shape[-2]
dist = torch.full((n, n), float("inf"), device=X.device, dtype=X.dtype)
dist[torch.triu_indices(n, n, offset=1).unbind()] = torch.nn.functional.pdist(
X, p=float("inf")
)
return (
(i, int(j))
# pyre-fixme[19]: Expected 1 positional argument.
for diff, j, i in zip(*(dist - tols).min(dim=-2), range(n))
if diff < 0
)
[docs]def feasible_hypervolume( # pragma: no cover
optimization_config: MultiObjectiveOptimizationConfig, values: Dict[str, np.ndarray]
) -> np.ndarray:
"""Compute the feasible hypervolume each iteration.
Args:
optimization_config: Optimization config.
values: Dictionary from metric name to array of value at each
iteration (each array is `n`-dim). If optimization config contains
outcome constraints, values for them must be present in `values`.
Returns: Array of feasible hypervolumes.
"""
# Get objective at each iteration
obj_threshold_dict = {
ot.metric.name: ot.bound for ot in optimization_config.objective_thresholds
}
f_vals = np.hstack(
[values[m.name].reshape(-1, 1) for m in optimization_config.objective.metrics]
)
obj_thresholds = np.array(
[obj_threshold_dict[m.name] for m in optimization_config.objective.metrics]
)
# Set infeasible points to be the objective threshold
for oc in optimization_config.outcome_constraints:
if oc.relative:
raise ValueError( # pragma: no cover
"Benchmark aggregation does not support relative constraints"
)
g = values[oc.metric.name]
feas = g <= oc.bound if oc.op == ComparisonOp.LEQ else g >= oc.bound
f_vals[~feas] = obj_thresholds
obj_weights = np.array(
[-1 if m.lower_is_better else 1 for m in optimization_config.objective.metrics]
)
obj_thresholds = obj_thresholds * obj_weights
f_vals = f_vals * obj_weights
partitioning = DominatedPartitioning(
ref_point=torch.from_numpy(obj_thresholds).double()
)
f_vals_torch = torch.from_numpy(f_vals).double()
# compute hv at each iteration
hvs = []
for i in range(f_vals.shape[0]):
# update with new point
partitioning.update(Y=f_vals_torch[i : i + 1])
hv = partitioning.compute_hypervolume().item()
hvs.append(hv)
return np.array(hvs)
def _array_to_tensor(
array: Union[np.ndarray, List[float]],
modelbridge: Optional[modelbridge_module.base.ModelBridge] = None,
) -> Tensor:
if modelbridge and hasattr(modelbridge, "_array_to_tensor"):
# pyre-ignore[16]: modelbridge does not have attribute `_array_to_tensor`
return modelbridge._array_to_tensor(array)
else:
return torch.tensor(array)
def _get_modelbridge_training_data(
modelbridge: modelbridge_module.torch.TorchModelBridge,
) -> Tuple[List[ObservationFeatures], List[ObservationData], List[Optional[str]]]:
obs = modelbridge.get_training_data()
return _unpack_observations(obs=obs)
def _unpack_observations(
obs: List[Observation],
) -> Tuple[List[ObservationFeatures], List[ObservationData], List[Optional[str]]]:
obs_feats, obs_data, arm_names = [], [], []
for ob in obs:
obs_feats.append(ob.features)
obs_data.append(ob.data)
arm_names.append(ob.arm_name)
return obs_feats, obs_data, arm_names