#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
from __future__ import annotations
import warnings
from copy import deepcopy
from functools import partial
from logging import Logger
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Tuple,
Type,
TYPE_CHECKING,
Union,
)
import numpy as np
import torch
from ax.core.data import Data
from ax.core.experiment import Experiment
from ax.core.objective import MultiObjective, Objective, ScalarizedObjective
from ax.core.observation import Observation, ObservationData, ObservationFeatures
from ax.core.optimization_config import (
MultiObjectiveOptimizationConfig,
OptimizationConfig,
TRefPoint,
)
from ax.core.outcome_constraint import (
ComparisonOp,
OutcomeConstraint,
ScalarizedOutcomeConstraint,
)
from ax.core.parameter import ChoiceParameter, Parameter, ParameterType, RangeParameter
from ax.core.parameter_constraint import ParameterConstraint
from ax.core.risk_measures import RiskMeasure
from ax.core.search_space import (
RobustSearchSpace,
RobustSearchSpaceDigest,
SearchSpace,
SearchSpaceDigest,
)
from ax.core.types import TBounds, TCandidateMetadata
from ax.core.utils import ( # noqa F402: Temporary import for backward compatibility.
get_pending_observation_features, # noqa F401
get_pending_observation_features_based_on_trial_status, # noqa F401
)
from ax.exceptions.core import DataRequiredError, UserInputError
from ax.modelbridge.transforms.base import Transform
from ax.modelbridge.transforms.utils import (
derelativize_optimization_config_with_raw_status_quo,
)
from ax.models.torch.botorch_moo_defaults import pareto_frontier_evaluator
from ax.models.torch.frontier_utils import (
get_weighted_mc_objective_and_objective_thresholds,
)
from ax.utils.common.logger import get_logger
from ax.utils.common.typeutils import (
checked_cast,
checked_cast_optional,
checked_cast_to_tuple,
not_none,
)
from botorch.acquisition.multi_objective.multi_output_risk_measures import (
IndependentCVaR,
IndependentVaR,
MARS,
MultiOutputExpectation,
MVaR,
)
from botorch.acquisition.risk_measures import (
CVaR,
Expectation,
RiskMeasureMCObjective,
VaR,
WorstCase,
)
from botorch.utils.datasets import ContextualDataset, SupervisedDataset
from botorch.utils.multi_objective.box_decompositions.dominated import (
DominatedPartitioning,
)
from torch import Tensor
logger: Logger = get_logger(__name__)
if TYPE_CHECKING:
# import as module to make sphinx-autodoc-typehints happy
from ax import modelbridge as modelbridge_module # noqa F401
"""A mapping of risk measure names to the corresponding classes.
NOTE: This can be extended with user-defined risk measure classes by
importing the dictionary and adding the new risk measure class as
`RISK_MEASURE_NAME_TO_CLASS["my_risk_measure"] = MyRiskMeasure`.
An example of this is found in `tests/test_risk_measure`.
"""
RISK_MEASURE_NAME_TO_CLASS: Dict[str, Type[RiskMeasureMCObjective]] = {
"Expectation": Expectation,
"CVaR": CVaR,
"MARS": MARS,
"MVaR": MVaR,
"IndependentCVaR": IndependentCVaR,
"IndependentVaR": IndependentVaR,
"MultiOutputExpectation": MultiOutputExpectation,
"VaR": VaR,
"WorstCase": WorstCase,
}
[docs]def check_has_multi_objective_and_data(
experiment: Experiment,
data: Data,
optimization_config: Optional[OptimizationConfig] = None,
) -> None:
"""Raise an error if not using a `MultiObjective` or if the data is empty."""
optimization_config = not_none(
optimization_config or experiment.optimization_config
)
if not isinstance(optimization_config.objective, MultiObjective):
raise ValueError("Multi-objective optimization requires multiple objectives.")
if data.df.empty:
raise ValueError("MultiObjectiveOptimization requires non-empty data.")
[docs]def get_fixed_features(
fixed_features: Optional[ObservationFeatures], param_names: List[str]
) -> Optional[Dict[int, float]]:
"""Reformat a set of fixed_features."""
if fixed_features is None:
return None
fixed_features_dict = {}
for p_name, val in fixed_features.parameters.items():
# These all need to be floats at this point.
# pyre-ignore[6]: All float here.
val_ = float(val)
fixed_features_dict[param_names.index(p_name)] = val_
fixed_features_dict = fixed_features_dict if len(fixed_features_dict) > 0 else None
return fixed_features_dict
[docs]def get_fixed_features_from_experiment(
experiment: Experiment,
) -> ObservationFeatures:
completed_indices = [t.index for t in experiment.completed_trials]
completed_indices.append(0) # handle case of no completed trials
return ObservationFeatures(
parameters={},
trial_index=max(completed_indices),
)
[docs]def pending_observations_as_array_list(
pending_observations: Dict[str, List[ObservationFeatures]],
outcome_names: List[str],
param_names: List[str],
) -> Optional[List[np.ndarray]]:
"""Re-format pending observations.
Args:
pending_observations: List of raw numpy pending observations.
outcome_names: List of outcome names.
param_names: List fitted param names.
Returns:
Filtered pending observations data, by outcome and param names.
"""
if len(pending_observations) == 0:
return None
pending = [np.array([]) for _ in outcome_names]
for metric_name, po_list in pending_observations.items():
# It is possible that some metrics attached to the experiment should
# not be included in pending features for a given model. For example,
# if a model is fit to the initial data that is missing some of the
# metrics on the experiment or if a model just should not be fit for
# some of the metrics attached to the experiment, so metrics that
# appear in pending_observations (drawn from an experiment) but not
# in outcome_names (metrics, expected for the model) are filtered out.
if metric_name not in outcome_names:
continue
pending[outcome_names.index(metric_name)] = np.array(
[[po.parameters[p] for p in param_names] for po in po_list]
)
return pending
[docs]def parse_observation_features(
X: np.ndarray,
param_names: List[str],
candidate_metadata: Optional[List[TCandidateMetadata]] = None,
) -> List[ObservationFeatures]:
"""Re-format raw model-generated candidates into ObservationFeatures.
Args:
param_names: List of param names.
X: Raw np.ndarray of candidate values.
candidate_metadata: Model's metadata for candidates it produced.
Returns:
List of candidates, represented as ObservationFeatures.
"""
if candidate_metadata and len(candidate_metadata) != len(X):
raise ValueError(
"Observations metadata list provided is not of "
"the same size as the number of candidates."
)
observation_features = []
for i, x in enumerate(X):
observation_features.append(
ObservationFeatures(
parameters=dict(zip(param_names, x)),
metadata=candidate_metadata[i] if candidate_metadata else None,
)
)
return observation_features
[docs]def get_pareto_frontier_and_configs(
modelbridge: modelbridge_module.torch.TorchModelBridge,
observation_features: List[ObservationFeatures],
observation_data: Optional[List[ObservationData]] = None,
objective_thresholds: Optional[TRefPoint] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
arm_names: Optional[List[Optional[str]]] = None,
use_model_predictions: bool = True,
) -> Tuple[List[Observation], Tensor, Tensor, Optional[Tensor]]:
"""Helper that applies transforms and calls ``frontier_evaluator``.
Returns the ``frontier_evaluator`` configs in addition to the Pareto
observations.
Args:
modelbridge: ``Modelbridge`` used to predict metrics outcomes.
observation_features: Observation features to consider for the Pareto
frontier.
observation_data: Data for computing the Pareto front, unless
``observation_features`` are provided and ``model_predictions is True``.
objective_thresholds: Metric values bounding the region of interest in
the objective outcome space; used to override objective thresholds
specified in ``optimization_config``, if necessary.
optimization_config: Multi-objective optimization config.
arm_names: Arm names for each observation in ``observation_features``.
use_model_predictions: If ``True``, will use model predictions at
``observation_features`` to compute Pareto front. If ``False``,
will use ``observation_data`` directly to compute Pareto front, ignoring
``observation_features``.
Returns: Four-item tuple of:
- frontier_observations: Observations of points on the pareto frontier,
- f: n x m tensor representation of the Pareto frontier values where n is the
length of frontier_observations and m is the number of metrics,
- obj_w: m tensor of objective weights,
- obj_t: m tensor of objective thresholds corresponding to Y, or None if no
objective thresholds used.
"""
# Input validation
if use_model_predictions:
if observation_data is not None:
warnings.warn(
"You provided `observation_data` when `use_model_predictions` is True; "
"`observation_data` will not be used.",
stacklevel=2,
)
else:
if observation_data is None:
raise ValueError(
"`observation_data` must not be None when `use_model_predictions` is "
"True."
)
array_to_tensor = partial(_array_to_tensor, modelbridge=modelbridge)
if use_model_predictions:
observation_data = modelbridge._predict_observation_data(
observation_features=observation_features
)
Y, Yvar = observation_data_to_array(
outcomes=modelbridge.outcomes, observation_data=not_none(observation_data)
)
Y, Yvar = (array_to_tensor(Y), array_to_tensor(Yvar))
if arm_names is None:
arm_names = [None] * len(observation_features)
# Extract optimization config: make sure that the problem is a MOO
# problem and clone the optimization config with specified
# `objective_thresholds` if those are provided. If `optimization_config`
# is not specified, uses the one stored on `modelbridge`.
optimization_config = _get_multiobjective_optimization_config(
modelbridge=modelbridge,
optimization_config=optimization_config,
objective_thresholds=objective_thresholds,
)
# Transform optimization config.
# de-relativize outcome constraints and objective thresholds
observations = modelbridge.get_training_data()
optimization_config = checked_cast(
MultiObjectiveOptimizationConfig,
derelativize_optimization_config_with_raw_status_quo(
optimization_config=optimization_config,
modelbridge=modelbridge,
observations=observations,
),
)
# Extract weights, constraints, and objective_thresholds
objective_weights = extract_objective_weights(
objective=optimization_config.objective, outcomes=modelbridge.outcomes
)
outcome_constraints = extract_outcome_constraints(
outcome_constraints=optimization_config.outcome_constraints,
outcomes=modelbridge.outcomes,
)
obj_t = extract_objective_thresholds(
objective_thresholds=optimization_config.objective_thresholds,
objective=optimization_config.objective,
outcomes=modelbridge.outcomes,
)
if obj_t is not None:
obj_t = array_to_tensor(obj_t)
# Transform to tensors.
obj_w, oc_c, _, _, _ = validate_and_apply_final_transform(
objective_weights=objective_weights,
outcome_constraints=outcome_constraints,
linear_constraints=None,
pending_observations=None,
final_transform=array_to_tensor,
)
f, cov, indx = pareto_frontier_evaluator(
model=None,
X=None,
Y=Y,
Yvar=Yvar,
objective_thresholds=obj_t,
objective_weights=obj_w,
outcome_constraints=oc_c,
)
f, cov = f.detach().cpu().clone(), cov.detach().cpu().clone()
indx = indx.tolist()
frontier_observation_data = array_to_observation_data(
f=f.numpy(), cov=cov.numpy(), outcomes=not_none(modelbridge.outcomes)
)
# Construct observations
frontier_observations = []
for i, obsd in enumerate(frontier_observation_data):
frontier_observations.append(
Observation(
features=deepcopy(observation_features[indx[i]]),
data=deepcopy(obsd),
arm_name=arm_names[indx[i]],
)
)
return (
frontier_observations,
f,
obj_w.cpu(),
obj_t.cpu() if obj_t is not None else None,
)
[docs]def pareto_frontier(
modelbridge: modelbridge_module.torch.TorchModelBridge,
observation_features: List[ObservationFeatures],
observation_data: Optional[List[ObservationData]] = None,
objective_thresholds: Optional[TRefPoint] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
arm_names: Optional[List[Optional[str]]] = None,
use_model_predictions: bool = True,
) -> List[Observation]:
"""Compute the list of points on the Pareto frontier as `Observation`-s
in the untransformed search space.
Args:
modelbridge: ``Modelbridge`` used to predict metrics outcomes.
observation_features: Observation features to consider for the Pareto
frontier.
observation_data: Data for computing the Pareto front, unless
``observation_features`` are provided and ``model_predictions is True``.
objective_thresholds: Metric values bounding the region of interest in
the objective outcome space; used to override objective thresholds
specified in ``optimization_config``, if necessary.
optimization_config: Multi-objective optimization config.
arm_names: Arm names for each observation in ``observation_features``.
use_model_predictions: If ``True``, will use model predictions at
``observation_features`` to compute Pareto front. If ``False``,
will use ``observation_data`` directly to compute Pareto front, ignoring
``observation_features``.
Returns: Points on the Pareto frontier as `Observation`-s in order of descending
individual hypervolume if possible.
"""
frontier_observations, f, obj_w, obj_t = get_pareto_frontier_and_configs(
modelbridge=modelbridge,
observation_features=observation_features,
observation_data=observation_data,
objective_thresholds=objective_thresholds,
optimization_config=optimization_config,
arm_names=arm_names,
use_model_predictions=use_model_predictions,
)
# If no objective thresholds are present we cannot compute hypervolume -- return
# frontier observations in arbitrary order
if obj_t is None:
return frontier_observations
# Apply appropriate weights and thresholds
obj, obj_t = get_weighted_mc_objective_and_objective_thresholds(
objective_weights=obj_w, objective_thresholds=obj_t
)
f_t = obj(f)
# Compute individual hypervolumes by taking the difference between the observation
# and the reference point and multiplying
individual_hypervolumes = (
(f_t.unsqueeze(dim=0) - obj_t).clamp_min(0).prod(dim=-1).squeeze().tolist()
)
if not isinstance(individual_hypervolumes, list):
individual_hypervolumes = [individual_hypervolumes]
return [
obs
for obs, _ in sorted(
zip(frontier_observations, individual_hypervolumes),
key=lambda tup: tup[1],
reverse=True,
)
]
[docs]def predicted_pareto_frontier(
modelbridge: modelbridge_module.torch.TorchModelBridge,
objective_thresholds: Optional[TRefPoint] = None,
observation_features: Optional[List[ObservationFeatures]] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
) -> List[Observation]:
"""Generate a Pareto frontier based on the posterior means of given
observation features. Given a model and optionally features to evaluate
(will use model training data if not specified), use the model to predict
which points lie on the Pareto frontier.
Args:
modelbridge: ``Modelbridge`` used to predict metrics outcomes.
observation_features: Observation features to predict, if provided and
``use_model_predictions is True``.
objective_thresholds: Metric values bounding the region of interest in
the objective outcome space; used to override objective thresholds
specified in ``optimization_config``, if necessary.
optimization_config: Multi-objective optimization config.
Returns:
Observations representing points on the Pareto frontier.
"""
if observation_features is None:
observation_features, _, arm_names = _get_modelbridge_training_data(
modelbridge=modelbridge
)
else:
arm_names = None
if not observation_features:
raise ValueError(
"Must receive observation_features as input or the model must "
"have training data."
)
pareto_observations = pareto_frontier(
modelbridge=modelbridge,
objective_thresholds=objective_thresholds,
observation_features=observation_features,
optimization_config=optimization_config,
arm_names=arm_names,
)
return pareto_observations
[docs]def observed_pareto_frontier(
modelbridge: modelbridge_module.torch.TorchModelBridge,
objective_thresholds: Optional[TRefPoint] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
) -> List[Observation]:
"""Generate a pareto frontier based on observed data. Given observed data
(sourced from model training data), return points on the Pareto frontier
as `Observation`-s.
Args:
modelbridge: ``Modelbridge`` that holds previous training data.
objective_thresholds: Metric values bounding the region of interest in
the objective outcome space; used to override objective thresholds
in the optimization config, if needed.
optimization_config: Multi-objective optimization config.
Returns:
Data representing points on the pareto frontier.
"""
# Get observation_data from current training data
obs_feats, obs_data, arm_names = _get_modelbridge_training_data(
modelbridge=modelbridge
)
pareto_observations = pareto_frontier(
modelbridge=modelbridge,
objective_thresholds=objective_thresholds,
observation_data=obs_data,
observation_features=obs_feats,
optimization_config=optimization_config,
arm_names=arm_names,
use_model_predictions=False,
)
return pareto_observations
[docs]def hypervolume(
modelbridge: modelbridge_module.torch.TorchModelBridge,
observation_features: List[ObservationFeatures],
objective_thresholds: Optional[TRefPoint] = None,
observation_data: Optional[List[ObservationData]] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
selected_metrics: Optional[List[str]] = None,
use_model_predictions: bool = True,
) -> float:
"""Helper function that computes (feasible) hypervolume.
Args:
modelbridge: The modelbridge.
observation_features: The observation features for the in-sample arms.
objective_thresholds: The objective thresholds to be used for computing
the hypervolume. If None, these are extracted from the optimization
config.
observation_data: The observed outcomes for the in-sample arms.
optimization_config: The optimization config specifying the objectives,
objectives thresholds, and outcome constraints.
selected_metrics: A list of objective metric names specifying which
objectives to use in hypervolume computation. By default, all
objectives are used.
use_model_predictions: A boolean indicating whether to use model predictions
for determining the in-sample Pareto frontier instead of the raw observed
values.
Returns:
The (feasible) hypervolume.
"""
frontier_observations, f, obj_w, obj_t = get_pareto_frontier_and_configs(
modelbridge=modelbridge,
observation_features=observation_features,
observation_data=observation_data,
objective_thresholds=objective_thresholds,
optimization_config=optimization_config,
use_model_predictions=use_model_predictions,
)
if obj_t is None:
raise ValueError(
"Cannot compute hypervolume without having objective thresholds specified."
)
oc = _get_multiobjective_optimization_config(
modelbridge=modelbridge,
optimization_config=optimization_config,
objective_thresholds=objective_thresholds,
)
# Set to all metrics if unspecified
if selected_metrics is None:
selected_metrics = oc.objective.metric_names
# filter to only include objectives
else:
if any(m not in oc.objective.metric_names for m in selected_metrics):
raise ValueError("All selected metrics must be objectives.")
# Create a mask indicating selected metrics
selected_metrics_mask = torch.tensor(
[metric in selected_metrics for metric in modelbridge.outcomes],
dtype=torch.bool,
device=f.device,
)
# Apply appropriate weights and thresholds
obj, obj_t = get_weighted_mc_objective_and_objective_thresholds(
objective_weights=obj_w, objective_thresholds=not_none(obj_t)
)
f_t = obj(f)
obj_mask = obj_w.nonzero().view(-1)
selected_metrics_mask = selected_metrics_mask[obj_mask]
f_t = f_t[:, selected_metrics_mask]
obj_t = obj_t[selected_metrics_mask]
bd = DominatedPartitioning(ref_point=obj_t, Y=f_t)
return bd.compute_hypervolume().item()
def _get_multiobjective_optimization_config(
modelbridge: modelbridge_module.torch.TorchModelBridge,
optimization_config: Optional[OptimizationConfig] = None,
objective_thresholds: Optional[TRefPoint] = None,
) -> MultiObjectiveOptimizationConfig:
# Optimization_config
mooc = optimization_config or checked_cast_optional(
MultiObjectiveOptimizationConfig, modelbridge._optimization_config
)
if not mooc:
raise ValueError(
(
"Experiment must have an existing optimization_config "
"of type `MultiObjectiveOptimizationConfig` "
"or `optimization_config` must be passed as an argument."
)
)
if not isinstance(mooc, MultiObjectiveOptimizationConfig):
raise ValueError(
"optimization_config must be a MultiObjectiveOptimizationConfig."
)
if objective_thresholds:
mooc = mooc.clone_with_args(objective_thresholds=objective_thresholds)
return mooc
[docs]def predicted_hypervolume(
modelbridge: modelbridge_module.torch.TorchModelBridge,
objective_thresholds: Optional[TRefPoint] = None,
observation_features: Optional[List[ObservationFeatures]] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
selected_metrics: Optional[List[str]] = None,
) -> float:
"""Calculate hypervolume of a pareto frontier based on the posterior means of
given observation features.
Given a model and features to evaluate calculate the hypervolume of the pareto
frontier formed from their predicted outcomes.
Args:
modelbridge: Modelbridge used to predict metrics outcomes.
objective_thresholds: point defining the origin of hyperrectangles that
can contribute to hypervolume.
observation_features: observation features to predict. Model's training
data used by default if unspecified.
optimization_config: Optimization config
selected_metrics: If specified, hypervolume will only be evaluated on
the specified subset of metrics. Otherwise, all metrics will be used.
Returns:
calculated hypervolume.
"""
if observation_features is None:
(
observation_features,
_,
__,
) = _get_modelbridge_training_data(modelbridge=modelbridge)
if not observation_features:
raise ValueError(
"Must receive observation_features as input or the model must "
"have training data."
)
return hypervolume(
modelbridge=modelbridge,
objective_thresholds=objective_thresholds,
observation_features=observation_features,
optimization_config=optimization_config,
selected_metrics=selected_metrics,
)
[docs]def observed_hypervolume(
modelbridge: modelbridge_module.torch.TorchModelBridge,
objective_thresholds: Optional[TRefPoint] = None,
optimization_config: Optional[MultiObjectiveOptimizationConfig] = None,
selected_metrics: Optional[List[str]] = None,
) -> float:
"""Calculate hypervolume of a pareto frontier based on observed data.
Given observed data, return the hypervolume of the pareto frontier formed from
those outcomes.
Args:
modelbridge: Modelbridge that holds previous training data.
objective_thresholds: Point defining the origin of hyperrectangles that
can contribute to hypervolume. Note that if this is None,
`objective_thresholds` must be present on the
`modelbridge.optimization_config`.
observation_features: observation features to predict. Model's training
data used by default if unspecified.
optimization_config: Optimization config
selected_metrics: If specified, hypervolume will only be evaluated on
the specified subset of metrics. Otherwise, all metrics will be used.
Returns:
(float) calculated hypervolume.
"""
# Get observation_data from current training data.
obs_feats, obs_data, _ = _get_modelbridge_training_data(modelbridge=modelbridge)
return hypervolume(
modelbridge=modelbridge,
objective_thresholds=objective_thresholds,
observation_features=obs_feats,
observation_data=obs_data,
optimization_config=optimization_config,
selected_metrics=selected_metrics,
use_model_predictions=False,
)
[docs]def array_to_observation_data(
f: np.ndarray, cov: np.ndarray, outcomes: List[str]
) -> List[ObservationData]:
"""Convert arrays of model predictions to a list of ObservationData.
Args:
f: An (n x m) array
cov: An (n x m x m) array
outcomes: A list of d outcome names
Returns: A list of n ObservationData
"""
observation_data = []
for i in range(f.shape[0]):
observation_data.append(
ObservationData(
metric_names=list(outcomes),
means=f[i, :].copy(),
covariance=cov[i, :, :].copy(),
)
)
return observation_data
[docs]def observation_data_to_array(
outcomes: List[str],
observation_data: List[ObservationData],
) -> Tuple[np.ndarray, np.ndarray]:
"""Convert a list of Observation data to arrays.
Args:
observation_data: A list of n ObservationData
Returns:
An array of n ObservationData, each containing
- f: An (n x m) array
- cov: An (n x m x m) array
"""
means = []
cov = []
for obsd in observation_data:
try:
metric_idxs = np.array([obsd.metric_names.index(m) for m in outcomes])
except ValueError:
missing = set(outcomes).difference(set(obsd.metric_names))
logger.warning(
f"{obsd} is missing the metrics {missing}. Ignoring the data "
"for the remaining metrics."
)
continue
means.append(obsd.means[metric_idxs])
cov.append(obsd.covariance[metric_idxs][:, metric_idxs])
return np.array(means), np.array(cov)
[docs]def observation_features_to_array(
parameters: List[str], obsf: List[ObservationFeatures]
) -> np.ndarray:
"""Convert a list of Observation features to arrays."""
return np.array([[of.parameters[p] for p in parameters] for of in obsf])
[docs]def feasible_hypervolume(
optimization_config: MultiObjectiveOptimizationConfig, values: Dict[str, np.ndarray]
) -> np.ndarray:
"""Compute the feasible hypervolume each iteration.
Args:
optimization_config: Optimization config.
values: Dictionary from metric name to array of value at each
iteration (each array is `n`-dim). If optimization config contains
outcome constraints, values for them must be present in `values`.
Returns: Array of feasible hypervolumes.
"""
# Get objective at each iteration
obj_threshold_dict = {
ot.metric.name: ot.bound for ot in optimization_config.objective_thresholds
}
f_vals = np.hstack(
[values[m.name].reshape(-1, 1) for m in optimization_config.objective.metrics]
)
obj_thresholds = np.array(
[obj_threshold_dict[m.name] for m in optimization_config.objective.metrics]
)
# Set infeasible points to be the objective threshold
for oc in optimization_config.outcome_constraints:
if oc.relative:
raise ValueError(
"Benchmark aggregation does not support relative constraints"
)
g = values[oc.metric.name]
feas = g <= oc.bound if oc.op == ComparisonOp.LEQ else g >= oc.bound
f_vals[~feas] = obj_thresholds
obj_weights = np.array(
[-1 if m.lower_is_better else 1 for m in optimization_config.objective.metrics]
)
obj_thresholds = obj_thresholds * obj_weights
f_vals = f_vals * obj_weights
partitioning = DominatedPartitioning(
ref_point=torch.from_numpy(obj_thresholds).double()
)
f_vals_torch = torch.from_numpy(f_vals).double()
# compute hv at each iteration
hvs = []
for i in range(f_vals.shape[0]):
# update with new point
partitioning.update(Y=f_vals_torch[i : i + 1])
hv = partitioning.compute_hypervolume().item()
hvs.append(hv)
return np.array(hvs)
def _array_to_tensor(
array: Union[np.ndarray, List[float]],
modelbridge: Optional[modelbridge_module.base.ModelBridge] = None,
) -> Tensor:
if modelbridge and hasattr(modelbridge, "_array_to_tensor"):
# pyre-ignore[16]: modelbridge does not have attribute `_array_to_tensor`
return modelbridge._array_to_tensor(array)
else:
return torch.tensor(array)
def _get_modelbridge_training_data(
modelbridge: modelbridge_module.torch.TorchModelBridge,
) -> Tuple[List[ObservationFeatures], List[ObservationData], List[Optional[str]]]:
obs = modelbridge.get_training_data()
return _unpack_observations(obs=obs)
def _unpack_observations(
obs: List[Observation],
) -> Tuple[List[ObservationFeatures], List[ObservationData], List[Optional[str]]]:
obs_feats, obs_data, arm_names = [], [], []
for ob in obs:
obs_feats.append(ob.features)
obs_data.append(ob.data)
arm_names.append(ob.arm_name)
return obs_feats, obs_data, arm_names
[docs]def process_contextual_datasets(
datasets: List[SupervisedDataset],
outcomes: List[str],
parameter_decomposition: Dict[str, List[str]],
metric_decomposition: Optional[Dict[str, List[str]]] = None,
) -> List[ContextualDataset]:
"""Contruct a list of `ContextualDataset`.
Args:
datasets: A list of `Dataset` objects.
outcomes: The names of the outcomes to extract observations for.
parameter_decomposition: Keys are context names. Values are the lists
of parameter names belonging to the context, e.g.
{'context1': ['p1_c1', 'p2_c1'],'context2': ['p1_c2', 'p2_c2']}.
metric_decomposition: Context breakdown metrics. Keys are context names.
Values are the lists of metric names belonging to the context:
{
'context1': ['m1_c1', 'm2_c1', 'm3_c1'],
'context2': ['m1_c2', 'm2_c2', 'm3_c2'],
}
Returns: A list of `ContextualDataset` objects. Order generally will not be that of
`outcomes`.
"""
context_buckets = list(parameter_decomposition.keys())
remaining_metrics = deepcopy(outcomes)
contextual_datasets = []
if metric_decomposition is not None:
M = len(metric_decomposition[context_buckets[0]])
for j in range(M):
metric_list = [metric_decomposition[c][j] for c in context_buckets]
contextual_datasets.append(
ContextualDataset(
datasets=[
datasets[outcomes.index(metric_i)] for metric_i in metric_list
],
parameter_decomposition=parameter_decomposition,
metric_decomposition=metric_decomposition,
)
)
remaining_metrics = list(set(remaining_metrics) - set(metric_list))
else:
logger.info(
"No metric decomposition found in experiment properties. Using "
"LCEA model to fit each outcome independently."
)
if len(remaining_metrics) > 0:
for metric_i in remaining_metrics:
contextual_datasets.append(
ContextualDataset(
datasets=[datasets[outcomes.index(metric_i)]],
parameter_decomposition=parameter_decomposition,
)
)
return contextual_datasets