Multi-objective Bayesian Optimization¶
TNK function $n=2$ variables: $x_i \in [0, \pi], i=1,2$
Objectives:
- $f_i(x) = x_i$
Constraints:
- $g_1(x) = -x_1^2 -x_2^2 + 1 + 0.1 \cos\left(16 \arctan \frac{x_1}{x_2}\right) \le 0$
- $g_2(x) = (x_1 - 1/2)^2 + (x_2-1/2)^2 \le 0.5$
In [1]:
Copied!
# set values if testing
import os
import pandas as pd
import numpy as np
from xopt import Xopt, Evaluator
from xopt.generators.bayesian import MOBOGenerator
from xopt.resources.test_functions.tnk import evaluate_TNK, tnk_vocs
import matplotlib.pyplot as plt
# Ignore all warnings
import warnings
warnings.filterwarnings("ignore")
SMOKE_TEST = os.environ.get("SMOKE_TEST")
N_MC_SAMPLES = 1 if SMOKE_TEST else 128
NUM_RESTARTS = 1 if SMOKE_TEST else 20
N_STEPS = 1 if SMOKE_TEST else 30
MAX_ITER = 1 if SMOKE_TEST else 200
evaluator = Evaluator(function=evaluate_TNK)
print(tnk_vocs.dict())
# set values if testing
import os
import pandas as pd
import numpy as np
from xopt import Xopt, Evaluator
from xopt.generators.bayesian import MOBOGenerator
from xopt.resources.test_functions.tnk import evaluate_TNK, tnk_vocs
import matplotlib.pyplot as plt
# Ignore all warnings
import warnings
warnings.filterwarnings("ignore")
SMOKE_TEST = os.environ.get("SMOKE_TEST")
N_MC_SAMPLES = 1 if SMOKE_TEST else 128
NUM_RESTARTS = 1 if SMOKE_TEST else 20
N_STEPS = 1 if SMOKE_TEST else 30
MAX_ITER = 1 if SMOKE_TEST else 200
evaluator = Evaluator(function=evaluate_TNK)
print(tnk_vocs.dict())
{'variables': {'x1': [0.0, 3.14159], 'x2': [0.0, 3.14159]}, 'constraints': {'c1': ['GREATER_THAN', 0.0], 'c2': ['LESS_THAN', 0.5]}, 'objectives': {'y1': 'MINIMIZE', 'y2': 'MINIMIZE'}, 'constants': {'a': 'dummy_constant'}, 'observables': []}
In [2]:
Copied!
generator = MOBOGenerator(vocs=tnk_vocs, reference_point={"y1": 1.5, "y2": 1.5})
generator.n_monte_carlo_samples = N_MC_SAMPLES
generator.numerical_optimizer.n_restarts = NUM_RESTARTS
generator.numerical_optimizer.max_iter = MAX_ITER
generator.gp_constructor.use_low_noise_prior = True
X = Xopt(generator=generator, evaluator=evaluator, vocs=tnk_vocs)
X.evaluate_data(pd.DataFrame({"x1": [1.0, 0.75], "x2": [0.75, 1.0]}))
for i in range(N_STEPS):
print(i)
X.step()
generator = MOBOGenerator(vocs=tnk_vocs, reference_point={"y1": 1.5, "y2": 1.5})
generator.n_monte_carlo_samples = N_MC_SAMPLES
generator.numerical_optimizer.n_restarts = NUM_RESTARTS
generator.numerical_optimizer.max_iter = MAX_ITER
generator.gp_constructor.use_low_noise_prior = True
X = Xopt(generator=generator, evaluator=evaluator, vocs=tnk_vocs)
X.evaluate_data(pd.DataFrame({"x1": [1.0, 0.75], "x2": [0.75, 1.0]}))
for i in range(N_STEPS):
print(i)
X.step()
0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
In [3]:
Copied!
X.generator.data
X.generator.data
Out[3]:
x1 | x2 | a | y1 | y2 | c1 | c2 | xopt_runtime | xopt_error | |
---|---|---|---|---|---|---|---|---|---|
0 | 1.000000 | 0.750000 | dummy_constant | 1.000000 | 0.750000 | 0.626888 | 0.312500 | 0.000231 | False |
1 | 0.750000 | 1.000000 | dummy_constant | 0.750000 | 1.000000 | 0.626888 | 0.312500 | 0.000137 | False |
2 | 0.338271 | 1.660682 | dummy_constant | 0.338271 | 1.660682 | 1.972021 | 1.373338 | 0.000153 | False |
3 | 0.205007 | 0.571904 | dummy_constant | 0.205007 | 0.571904 | -0.702266 | 0.092191 | 0.000150 | False |
4 | 0.000000 | 0.000000 | dummy_constant | 0.000000 | 0.000000 | -1.100000 | 0.500000 | 0.000152 | False |
5 | 0.000000 | 0.903686 | dummy_constant | 0.000000 | 0.903686 | -0.283351 | 0.412963 | 0.000154 | False |
6 | 0.278085 | 0.979258 | dummy_constant | 0.278085 | 0.979258 | 0.064424 | 0.278934 | 0.000150 | False |
7 | 3.085734 | 0.000000 | dummy_constant | 3.085734 | 0.000000 | 8.421757 | 6.936022 | 0.000153 | False |
8 | 0.944973 | 0.140977 | dummy_constant | 0.944973 | 0.140977 | -0.015507 | 0.326898 | 0.000152 | False |
9 | 0.985903 | 0.061507 | dummy_constant | 0.985903 | 0.061507 | -0.078503 | 0.428378 | 0.000149 | False |
10 | 1.022044 | 0.104556 | dummy_constant | 1.022044 | 0.104556 | 0.061535 | 0.428906 | 0.000151 | False |
11 | 0.085747 | 1.044210 | dummy_constant | 0.085747 | 1.044210 | 0.072031 | 0.467770 | 0.000153 | False |
12 | 0.785835 | 0.643724 | dummy_constant | 0.785835 | 0.643724 | 0.033371 | 0.102358 | 0.000152 | False |
13 | 1.010642 | 0.032004 | dummy_constant | 1.010642 | 0.032004 | -0.065023 | 0.479775 | 0.000152 | False |
14 | 0.585311 | 0.825508 | dummy_constant | 0.585311 | 0.825508 | 0.114381 | 0.113233 | 0.000106 | False |
15 | 1.039659 | 0.068549 | dummy_constant | 1.039659 | 0.068549 | 0.036130 | 0.477382 | 0.000150 | False |
16 | 0.046488 | 1.031537 | dummy_constant | 0.046488 | 1.031537 | -0.008913 | 0.488205 | 0.000156 | False |
17 | 0.898019 | 0.466369 | dummy_constant | 0.898019 | 0.466369 | 0.005041 | 0.159550 | 0.000154 | False |
18 | 0.423012 | 0.894659 | dummy_constant | 0.423012 | 0.894659 | -0.091491 | 0.161683 | 0.000154 | False |
19 | 0.971871 | 0.329689 | dummy_constant | 0.971871 | 0.329689 | 0.003507 | 0.251669 | 0.000153 | False |
20 | 1.023677 | 0.034798 | dummy_constant | 1.023677 | 0.034798 | -0.036456 | 0.490650 | 0.000154 | False |
21 | 0.030619 | 0.000000 | dummy_constant | 0.030619 | 0.000000 | -1.099063 | 0.470319 | 0.000156 | False |
22 | 0.857606 | 0.555516 | dummy_constant | 0.857606 | 0.555516 | 0.141497 | 0.130964 | 0.000156 | False |
23 | 0.800518 | 0.592865 | dummy_constant | 0.800518 | 0.592865 | 0.063789 | 0.098935 | 0.000153 | False |
24 | 0.393268 | 0.927788 | dummy_constant | 0.393268 | 0.927788 | -0.083685 | 0.194394 | 0.000116 | False |
25 | 1.034970 | 0.043078 | dummy_constant | 1.034970 | 0.043078 | -0.005638 | 0.494970 | 0.000154 | False |
26 | 0.000000 | 0.101538 | dummy_constant | 0.000000 | 0.101538 | -1.089690 | 0.408772 | 0.000156 | False |
27 | 0.500129 | 0.884016 | dummy_constant | 0.500129 | 0.884016 | 0.069043 | 0.147468 | 0.000154 | False |
28 | 0.015160 | 1.018749 | dummy_constant | 0.015160 | 1.018749 | -0.059100 | 0.504171 | 0.000153 | False |
29 | 1.002915 | 0.004609 | dummy_constant | 1.002915 | 0.004609 | -0.093871 | 0.498336 | 0.000148 | False |
30 | 0.033497 | 1.027672 | dummy_constant | 0.033497 | 1.027672 | -0.029484 | 0.496062 | 0.000143 | False |
31 | 0.697331 | 0.717353 | dummy_constant | 0.697331 | 0.717353 | -0.096582 | 0.086182 | 0.000154 | False |
plot results¶
In [4]:
Copied!
fig, ax = plt.subplots()
theta = np.linspace(0, np.pi / 2)
r = np.sqrt(1 + 0.1 * np.cos(16 * theta))
x_1 = r * np.sin(theta)
x_2_lower = r * np.cos(theta)
x_2_upper = (0.5 - (x_1 - 0.5) ** 2) ** 0.5 + 0.5
z = np.zeros_like(x_1)
# ax2.plot(x_1, x_2_lower,'r')
ax.fill_between(x_1, z, x_2_lower, fc="white")
circle = plt.Circle(
(0.5, 0.5), 0.5**0.5, color="r", alpha=0.25, zorder=0, label="Valid Region"
)
ax.add_patch(circle)
history = pd.concat(
[X.data, tnk_vocs.feasibility_data(X.data)], axis=1, ignore_index=False
)
ax.plot(*history[["x1", "x2"]][history["feasible"]].to_numpy().T, ".C1")
ax.plot(*history[["x1", "x2"]][~history["feasible"]].to_numpy().T, ".C2")
ax.set_xlim(0, 3.14)
ax.set_ylim(0, 3.14)
ax.set_xlabel("x1")
ax.set_ylabel("x2")
ax.set_aspect("equal")
fig, ax = plt.subplots()
theta = np.linspace(0, np.pi / 2)
r = np.sqrt(1 + 0.1 * np.cos(16 * theta))
x_1 = r * np.sin(theta)
x_2_lower = r * np.cos(theta)
x_2_upper = (0.5 - (x_1 - 0.5) ** 2) ** 0.5 + 0.5
z = np.zeros_like(x_1)
# ax2.plot(x_1, x_2_lower,'r')
ax.fill_between(x_1, z, x_2_lower, fc="white")
circle = plt.Circle(
(0.5, 0.5), 0.5**0.5, color="r", alpha=0.25, zorder=0, label="Valid Region"
)
ax.add_patch(circle)
history = pd.concat(
[X.data, tnk_vocs.feasibility_data(X.data)], axis=1, ignore_index=False
)
ax.plot(*history[["x1", "x2"]][history["feasible"]].to_numpy().T, ".C1")
ax.plot(*history[["x1", "x2"]][~history["feasible"]].to_numpy().T, ".C2")
ax.set_xlim(0, 3.14)
ax.set_ylim(0, 3.14)
ax.set_xlabel("x1")
ax.set_ylabel("x2")
ax.set_aspect("equal")
Plot path through input space¶
In [5]:
Copied!
ax = history.plot("x1", "x2")
ax.set_ylim(0, 3.14)
ax.set_xlim(0, 3.14)
ax.set_aspect("equal")
ax = history.plot("x1", "x2")
ax.set_ylim(0, 3.14)
ax.set_xlim(0, 3.14)
ax.set_aspect("equal")
In [6]:
Copied!
## visualize model
X.generator.visualize_model(show_feasibility=True)
## visualize model
X.generator.visualize_model(show_feasibility=True)
Out[6]:
(<Figure size 800x1980 with 22 Axes>, array([[<Axes: title={'center': 'Posterior Mean [y1]'}, ylabel='x2'>, <Axes: title={'center': 'Posterior SD [y1]'}>], [<Axes: title={'center': 'Posterior Mean [y2]'}, ylabel='x2'>, <Axes: title={'center': 'Posterior SD [y2]'}>], [<Axes: title={'center': 'Posterior Mean [c1]'}, ylabel='x2'>, <Axes: title={'center': 'Posterior SD [c1]'}>], [<Axes: title={'center': 'Posterior Mean [c2]'}, ylabel='x2'>, <Axes: title={'center': 'Posterior SD [c2]'}>], [<Axes: title={'center': 'Acq. Function'}, ylabel='x2'>, <Axes: >], [<Axes: title={'center': 'Feasibility'}, xlabel='x1', ylabel='x2'>, <Axes: xlabel='x1'>]], dtype=object))
In [7]:
Copied!
X.generator.update_pareto_front_history()
X.generator.pareto_front_history.plot(y="hypervolume", label="Hypervolume")
X.generator.update_pareto_front_history()
X.generator.pareto_front_history.plot(y="hypervolume", label="Hypervolume")
Out[7]:
<Axes: >
In [8]:
Copied!
X.generator.pareto_front_history
X.generator.pareto_front_history
Out[8]:
iteration | hypervolume | n_non_dominated | |
---|---|---|---|
0 | 0 | 0.375000 | 1 |
1 | 1 | 0.500000 | 2 |
2 | 2 | 0.500000 | 2 |
3 | 3 | 0.500000 | 2 |
4 | 4 | 0.500000 | 2 |
5 | 5 | 0.500000 | 2 |
6 | 6 | 0.750932 | 2 |
7 | 7 | 0.750932 | 2 |
8 | 8 | 0.750932 | 2 |
9 | 9 | 0.750932 | 2 |
10 | 10 | 1.059426 | 3 |
11 | 11 | 1.147092 | 4 |
12 | 12 | 1.221294 | 4 |
13 | 13 | 1.221294 | 4 |
14 | 14 | 1.252125 | 5 |
15 | 15 | 1.268700 | 6 |
16 | 16 | 1.268700 | 6 |
17 | 17 | 1.290696 | 7 |
18 | 18 | 1.290696 | 7 |
19 | 19 | 1.297554 | 8 |
20 | 20 | 1.297554 | 8 |
21 | 21 | 1.297554 | 8 |
22 | 22 | 1.301119 | 9 |
23 | 23 | 1.304022 | 10 |
24 | 24 | 1.304022 | 10 |
25 | 25 | 1.304022 | 10 |
26 | 26 | 1.304022 | 10 |
27 | 27 | 1.312135 | 11 |
28 | 28 | 1.312135 | 11 |
29 | 29 | 1.312135 | 11 |
30 | 30 | 1.312135 | 11 |
31 | 31 | 1.312135 | 11 |
In [9]:
Copied!
X.data
X.data
Out[9]:
x1 | x2 | a | y1 | y2 | c1 | c2 | xopt_runtime | xopt_error | |
---|---|---|---|---|---|---|---|---|---|
0 | 1.000000 | 0.750000 | dummy_constant | 1.000000 | 0.750000 | 0.626888 | 0.312500 | 0.000231 | False |
1 | 0.750000 | 1.000000 | dummy_constant | 0.750000 | 1.000000 | 0.626888 | 0.312500 | 0.000137 | False |
2 | 0.338271 | 1.660682 | dummy_constant | 0.338271 | 1.660682 | 1.972021 | 1.373338 | 0.000153 | False |
3 | 0.205007 | 0.571904 | dummy_constant | 0.205007 | 0.571904 | -0.702266 | 0.092191 | 0.000150 | False |
4 | 0.000000 | 0.000000 | dummy_constant | 0.000000 | 0.000000 | -1.100000 | 0.500000 | 0.000152 | False |
5 | 0.000000 | 0.903686 | dummy_constant | 0.000000 | 0.903686 | -0.283351 | 0.412963 | 0.000154 | False |
6 | 0.278085 | 0.979258 | dummy_constant | 0.278085 | 0.979258 | 0.064424 | 0.278934 | 0.000150 | False |
7 | 3.085734 | 0.000000 | dummy_constant | 3.085734 | 0.000000 | 8.421757 | 6.936022 | 0.000153 | False |
8 | 0.944973 | 0.140977 | dummy_constant | 0.944973 | 0.140977 | -0.015507 | 0.326898 | 0.000152 | False |
9 | 0.985903 | 0.061507 | dummy_constant | 0.985903 | 0.061507 | -0.078503 | 0.428378 | 0.000149 | False |
10 | 1.022044 | 0.104556 | dummy_constant | 1.022044 | 0.104556 | 0.061535 | 0.428906 | 0.000151 | False |
11 | 0.085747 | 1.044210 | dummy_constant | 0.085747 | 1.044210 | 0.072031 | 0.467770 | 0.000153 | False |
12 | 0.785835 | 0.643724 | dummy_constant | 0.785835 | 0.643724 | 0.033371 | 0.102358 | 0.000152 | False |
13 | 1.010642 | 0.032004 | dummy_constant | 1.010642 | 0.032004 | -0.065023 | 0.479775 | 0.000152 | False |
14 | 0.585311 | 0.825508 | dummy_constant | 0.585311 | 0.825508 | 0.114381 | 0.113233 | 0.000106 | False |
15 | 1.039659 | 0.068549 | dummy_constant | 1.039659 | 0.068549 | 0.036130 | 0.477382 | 0.000150 | False |
16 | 0.046488 | 1.031537 | dummy_constant | 0.046488 | 1.031537 | -0.008913 | 0.488205 | 0.000156 | False |
17 | 0.898019 | 0.466369 | dummy_constant | 0.898019 | 0.466369 | 0.005041 | 0.159550 | 0.000154 | False |
18 | 0.423012 | 0.894659 | dummy_constant | 0.423012 | 0.894659 | -0.091491 | 0.161683 | 0.000154 | False |
19 | 0.971871 | 0.329689 | dummy_constant | 0.971871 | 0.329689 | 0.003507 | 0.251669 | 0.000153 | False |
20 | 1.023677 | 0.034798 | dummy_constant | 1.023677 | 0.034798 | -0.036456 | 0.490650 | 0.000154 | False |
21 | 0.030619 | 0.000000 | dummy_constant | 0.030619 | 0.000000 | -1.099063 | 0.470319 | 0.000156 | False |
22 | 0.857606 | 0.555516 | dummy_constant | 0.857606 | 0.555516 | 0.141497 | 0.130964 | 0.000156 | False |
23 | 0.800518 | 0.592865 | dummy_constant | 0.800518 | 0.592865 | 0.063789 | 0.098935 | 0.000153 | False |
24 | 0.393268 | 0.927788 | dummy_constant | 0.393268 | 0.927788 | -0.083685 | 0.194394 | 0.000116 | False |
25 | 1.034970 | 0.043078 | dummy_constant | 1.034970 | 0.043078 | -0.005638 | 0.494970 | 0.000154 | False |
26 | 0.000000 | 0.101538 | dummy_constant | 0.000000 | 0.101538 | -1.089690 | 0.408772 | 0.000156 | False |
27 | 0.500129 | 0.884016 | dummy_constant | 0.500129 | 0.884016 | 0.069043 | 0.147468 | 0.000154 | False |
28 | 0.015160 | 1.018749 | dummy_constant | 0.015160 | 1.018749 | -0.059100 | 0.504171 | 0.000153 | False |
29 | 1.002915 | 0.004609 | dummy_constant | 1.002915 | 0.004609 | -0.093871 | 0.498336 | 0.000148 | False |
30 | 0.033497 | 1.027672 | dummy_constant | 0.033497 | 1.027672 | -0.029484 | 0.496062 | 0.000143 | False |
31 | 0.697331 | 0.717353 | dummy_constant | 0.697331 | 0.717353 | -0.096582 | 0.086182 | 0.000154 | False |
In [ ]:
Copied!