Bayesian Exploration with NaNs¶
As violations of constraints can lead to invalid values of the objective, the evaluate function may simply return NaNs. We demonstrate below how we can still perform Bayesian Exploration in that case.
In [1]:
Copied!
# set values if testing
import os
import pandas as pd
import torch
from copy import deepcopy
from xopt import Xopt, Evaluator
from xopt.generators.bayesian import BayesianExplorationGenerator
from xopt.resources.test_functions.tnk import evaluate_TNK, tnk_vocs
from gest_api.vocs import GreaterThanConstraint, LessThanConstraint
# Ignore all warnings
import warnings
warnings.filterwarnings("ignore")
SMOKE_TEST = os.environ.get("SMOKE_TEST")
NUM_MC_SAMPLES = 1 if SMOKE_TEST else 128
NUM_RESTARTS = 1 if SMOKE_TEST else 20
vocs = deepcopy(tnk_vocs)
vocs.objectives = {"y1": "EXPLORE"}
# set values if testing
import os
import pandas as pd
import torch
from copy import deepcopy
from xopt import Xopt, Evaluator
from xopt.generators.bayesian import BayesianExplorationGenerator
from xopt.resources.test_functions.tnk import evaluate_TNK, tnk_vocs
from gest_api.vocs import GreaterThanConstraint, LessThanConstraint
# Ignore all warnings
import warnings
warnings.filterwarnings("ignore")
SMOKE_TEST = os.environ.get("SMOKE_TEST")
NUM_MC_SAMPLES = 1 if SMOKE_TEST else 128
NUM_RESTARTS = 1 if SMOKE_TEST else 20
vocs = deepcopy(tnk_vocs)
vocs.objectives = {"y1": "EXPLORE"}
/home/runner/work/Xopt/Xopt/.venv/lib/python3.12/site-packages/pyro/ops/stats.py:527: SyntaxWarning: invalid escape sequence '\g'
we have :math:`ES^{*}(P,Q) \ge ES^{*}(Q,Q)` with equality holding if and only if :math:`P=Q`, i.e.
In [2]:
Copied!
# modify the evaluate function to return NaNs if constraints are violated
def evaluate(input_dict):
output_dict = evaluate_TNK(input_dict)
del output_dict["y2"]
for c in vocs.constraints.keys():
if (
isinstance(vocs.constraints[c], GreaterThanConstraint)
and output_dict[c] <= vocs.constraints[c].value
):
output_dict["y1"] = torch.nan
elif (
isinstance(vocs.constraints[c], LessThanConstraint)
and output_dict[c] >= vocs.constraints[c].value
):
output_dict["y1"] = torch.nan
return output_dict
# modify the evaluate function to return NaNs if constraints are violated
def evaluate(input_dict):
output_dict = evaluate_TNK(input_dict)
del output_dict["y2"]
for c in vocs.constraints.keys():
if (
isinstance(vocs.constraints[c], GreaterThanConstraint)
and output_dict[c] <= vocs.constraints[c].value
):
output_dict["y1"] = torch.nan
elif (
isinstance(vocs.constraints[c], LessThanConstraint)
and output_dict[c] >= vocs.constraints[c].value
):
output_dict["y1"] = torch.nan
return output_dict
In [3]:
Copied!
generator = BayesianExplorationGenerator(vocs=vocs)
generator.max_travel_distances = [0.25, 0.25]
generator.n_monte_carlo_samples = NUM_MC_SAMPLES
generator.numerical_optimizer.n_restarts = NUM_RESTARTS
evaluator = Evaluator(function=evaluate)
X = Xopt(generator=generator, evaluator=evaluator)
X
generator = BayesianExplorationGenerator(vocs=vocs)
generator.max_travel_distances = [0.25, 0.25]
generator.n_monte_carlo_samples = NUM_MC_SAMPLES
generator.numerical_optimizer.n_restarts = NUM_RESTARTS
evaluator = Evaluator(function=evaluate)
X = Xopt(generator=generator, evaluator=evaluator)
X
Out[3]:
Xopt
________________________________
Version: 0.1.dev1+gca942dd06
Data size: 0
Config as YAML:
dump_file: null
evaluator:
function: __main__.evaluate
function_kwargs: {}
max_workers: 1
vectorized: false
generator:
computation_time: null
custom_objective: null
fixed_features: null
gp_constructor:
covar_modules: {}
custom_noise_prior: null
mean_modules: {}
name: standard
train_config: null
train_kwargs: null
train_method: lbfgs
train_model: true
trainable_mean_keys: []
transform_inputs: true
use_cached_hyperparameters: false
use_low_noise_prior: false
max_travel_distances:
- 0.25
- 0.25
model: null
n_candidates: 1
n_interpolate_points: null
n_monte_carlo_samples: 128
name: bayesian_exploration
numerical_optimizer:
max_iter: 2000
max_time: 5.0
n_restarts: 20
name: LBFGS
returns_id: false
supports_batch_generation: true
supports_constraints: true
supports_multi_objective: true
supports_single_objective: true
turbo_controller: null
use_cuda: false
vocs:
constants:
a:
dtype: null
type: Constant
value: dummy_constant
constraints:
c1:
dtype: null
type: GreaterThanConstraint
value: 0.0
c2:
dtype: null
type: LessThanConstraint
value: 0.5
objectives:
y1:
dtype: null
type: ExploreObjective
observables: {}
variables:
x1:
default_value: null
domain:
- 0.0
- 3.14159
dtype: null
type: ContinuousVariable
x2:
default_value: null
domain:
- 0.0
- 3.14159
dtype: null
type: ContinuousVariable
serialize_inline: false
serialize_torch: false
stopping_condition: null
strict: true
Run exploration¶
We start with evaluating 2 points that we know satisfy the constraints. We then run 30 exploration steps.
In [4]:
Copied!
X.evaluate_data(pd.DataFrame({"x1": [1.0, 0.75], "x2": [0.7, 0.95]}))
X.evaluate_data(pd.DataFrame({"x1": [1.0, 0.75], "x2": [0.7, 0.95]}))
Out[4]:
| x1 | x2 | a | y1 | c1 | c2 | xopt_runtime | xopt_error | |
|---|---|---|---|---|---|---|---|---|
| 0 | 1.00 | 0.70 | dummy_constant | 1.00 | 0.584045 | 0.290 | 0.002593 | False |
| 1 | 0.75 | 0.95 | dummy_constant | 0.75 | 0.494833 | 0.265 | 0.000144 | False |
In [5]:
Copied!
N_STEPS = 1 if SMOKE_TEST else 30
for i in range(N_STEPS):
print(f"step {i}")
X.step()
N_STEPS = 1 if SMOKE_TEST else 30
for i in range(N_STEPS):
print(f"step {i}")
X.step()
step 0
step 1
step 2
step 3
step 4
step 5
step 6
step 7
step 8
step 9
step 10
step 11
step 12
step 13
step 14
step 15
step 16
step 17
step 18
step 19
step 20
step 21
step 22
step 23
step 24
step 25
step 26
step 27
step 28
step 29
In [6]:
Copied!
# view the data
X.data
# view the data
X.data
Out[6]:
| x1 | x2 | a | y1 | c1 | c2 | xopt_runtime | xopt_error | |
|---|---|---|---|---|---|---|---|---|
| 0 | 1.000000 | 0.700000 | dummy_constant | 1.000000 | 0.584045 | 0.290000 | 0.002593 | False |
| 1 | 0.750000 | 0.950000 | dummy_constant | 0.750000 | 0.494833 | 0.265000 | 0.000144 | False |
| 2 | 0.000000 | 1.735397 | dummy_constant | NaN | 1.911604 | 1.776207 | 0.004596 | False |
| 3 | 0.663353 | 1.058575 | dummy_constant | 0.663353 | 0.649854 | 0.338690 | 0.004417 | False |
| 4 | 1.006497 | 1.535983 | dummy_constant | NaN | 2.471253 | 1.329800 | 0.008819 | False |
| 5 | 1.612694 | 0.984256 | dummy_constant | NaN | 2.648709 | 1.472591 | 0.002675 | False |
| 6 | 0.827296 | 0.385012 | dummy_constant | NaN | -0.244725 | 0.120345 | 0.000170 | False |
| 7 | 0.358040 | 0.984898 | dummy_constant | 0.358040 | 0.022013 | 0.255279 | 0.000169 | False |
| 8 | 1.013053 | 0.200759 | dummy_constant | 1.013053 | 0.166575 | 0.352768 | 0.000166 | False |
| 9 | 1.176503 | 0.476586 | dummy_constant | 1.176503 | 0.512073 | 0.458204 | 0.000167 | False |
| 10 | 0.840300 | 0.000000 | dummy_constant | NaN | -0.393895 | 0.365804 | 0.000165 | False |
| 11 | 1.188028 | 0.514192 | dummy_constant | 1.188028 | 0.578971 | 0.473584 | 0.000167 | False |
| 12 | 0.504238 | 0.969011 | dummy_constant | 0.504238 | 0.175621 | 0.219990 | 0.000170 | False |
| 13 | 0.095290 | 1.030575 | dummy_constant | 0.095290 | 0.061621 | 0.445301 | 0.000167 | False |
| 14 | 0.284031 | 1.129254 | dummy_constant | 0.284031 | 0.425489 | 0.442603 | 0.000173 | False |
| 15 | 1.000088 | 0.934860 | dummy_constant | 1.000088 | 0.788326 | 0.439192 | 0.000164 | False |
| 16 | 1.095606 | 0.165936 | dummy_constant | 1.095606 | 0.301965 | 0.466346 | 0.000165 | False |
| 17 | 1.881004 | 0.000000 | dummy_constant | NaN | 2.438175 | 2.157172 | 0.000164 | False |
| 18 | 1.196125 | 0.537824 | dummy_constant | 1.196125 | 0.631162 | 0.486020 | 0.000164 | False |
| 19 | 0.410727 | 0.982412 | dummy_constant | 0.410727 | 0.033970 | 0.240691 | 0.000184 | False |
| 20 | 0.045319 | 1.028828 | dummy_constant | NaN | -0.015662 | 0.486394 | 0.000163 | False |
| 21 | 0.054663 | 1.040623 | dummy_constant | 0.054663 | 0.019115 | 0.490598 | 0.000166 | False |
| 22 | 0.672815 | 1.162888 | dummy_constant | 0.672815 | 0.856254 | 0.469286 | 0.000165 | False |
| 23 | 1.016347 | 0.960334 | dummy_constant | 1.016347 | 0.865300 | 0.478522 | 0.000165 | False |
| 24 | 0.234387 | 1.139353 | dummy_constant | 0.234387 | 0.452515 | 0.479323 | 0.000171 | False |
| 25 | 1.015534 | 0.968339 | dummy_constant | 1.015534 | 0.876144 | 0.485117 | 0.000167 | False |
| 26 | 0.230137 | 0.182941 | dummy_constant | NaN | -0.888890 | 0.173353 | 0.000169 | False |
| 27 | 0.000000 | 0.000000 | dummy_constant | NaN | -1.100000 | 0.500000 | 0.000166 | False |
| 28 | 0.746910 | 0.785397 | dummy_constant | 0.746910 | 0.082687 | 0.142416 | 0.000169 | False |
| 29 | 1.034880 | 0.061852 | dummy_constant | 1.034880 | 0.017053 | 0.478070 | 0.000166 | False |
| 30 | 1.196963 | 0.445347 | dummy_constant | 1.196963 | 0.547632 | 0.488744 | 0.000167 | False |
| 31 | 0.585893 | 1.190184 | dummy_constant | 0.585893 | 0.708837 | 0.483732 | 0.000165 | False |
In [7]:
Copied!
# plot results
ax = X.data.plot("x1", "x2")
ax.set_aspect("equal")
# plot results
ax = X.data.plot("x1", "x2")
ax.set_aspect("equal")