Multi-fidelity Multi-objective Bayesian Optimization¶
Here we attempt to solve for the constrained Pareto front of the TNK multi-objective optimization problem using Multi-Fidelity Multi-Objective Bayesian optimization. For simplicity we assume that the objective and constraint functions at lower fidelities is exactly equal to the functions at higher fidelities (this is obviously not a requirement, although for the best results lower fidelity calculations should correlate with higher fidelity ones). The algorithm should learn this relationship and use information gathered at lower fidelities to gather samples to improve the hypervolume of the Pareto front at the maximum fidelity.
TNK function $n=2$ variables: $x_i \in [0, \pi], i=1,2$
Objectives:
- $f_i(x) = x_i$
Constraints:
- $g_1(x) = -x_1^2 -x_2^2 + 1 + 0.1 \cos\left(16 \arctan \frac{x_1}{x_2}\right) \le 0$
- $g_2(x) = (x_1 - 1/2)^2 + (x_2-1/2)^2 \le 0.5$
# set values if testing
import os
from copy import deepcopy
import pandas as pd
import numpy as np
import torch
from xopt import Xopt, Evaluator
from xopt.generators.bayesian import MultiFidelityGenerator
from xopt.resources.test_functions.tnk import evaluate_TNK, tnk_vocs
import matplotlib.pyplot as plt
# Ignore all warnings
import warnings
warnings.filterwarnings("ignore")
SMOKE_TEST = os.environ.get("SMOKE_TEST")
N_MC_SAMPLES = 1 if SMOKE_TEST else 128
NUM_RESTARTS = 1 if SMOKE_TEST else 20
BUDGET = 0.02 if SMOKE_TEST else 10
evaluator = Evaluator(function=evaluate_TNK)
print(tnk_vocs.dict())
{'variables': {'x1': (0.0, 3.14159), 'x2': (0.0, 3.14159)}, 'constraints': {'c1': ('GREATER_THAN', 0.0), 'c2': ('LESS_THAN', 0.5)}, 'objectives': {'y1': 'MINIMIZE', 'y2': 'MINIMIZE'}, 'constants': {'a': 'dummy_constant'}, 'observables': []}
Set up the Multi-Fidelity Multi-objective optimization algorithm¶
Here we create the Multi-Fidelity generator object which can solve both single and multi-objective optimization problems depending on the number of objectives in VOCS. We specify a cost function as a function of fidelity parameter $s=[0,1]$ as $C(s) = s^{3.5}$ as an example from a real life multi-fidelity simulation problem.
my_vocs = deepcopy(tnk_vocs)
my_vocs.constraints = {}
generator = MultiFidelityGenerator(vocs=my_vocs, reference_point={"y1": 1.5, "y2": 1.5})
# set cost function according to approximate scaling of laser plasma accelerator
# problem, see https://journals.aps.org/prresearch/abstract/10.1103/PhysRevResearch.5.013063
generator.cost_function = lambda s: s**3.5
generator.numerical_optimizer.n_restarts = NUM_RESTARTS
generator.n_monte_carlo_samples = N_MC_SAMPLES
generator.gp_constructor.use_low_noise_prior = True
X = Xopt(generator=generator, evaluator=evaluator, vocs=my_vocs)
# evaluate at some explicit initial points
X.evaluate_data(pd.DataFrame({"x1": [1.0, 0.75], "x2": [0.75, 1.0], "s": [0.0, 0.1]}))
X
Xopt
________________________________
Version: 2.6.8.dev18+g6fb143c55.d20251203
Data size: 2
Config as YAML:
dump_file: null
evaluator:
function: xopt.resources.test_functions.tnk.evaluate_TNK
function_kwargs:
raise_probability: 0
random_sleep: 0
sleep: 0
max_workers: 1
vectorized: false
generator:
computation_time: null
custom_objective: null
fixed_features: null
gp_constructor:
covar_modules: {}
custom_noise_prior: null
mean_modules: {}
name: standard
trainable_mean_keys: []
transform_inputs: true
use_cached_hyperparameters: false
use_low_noise_prior: true
max_travel_distances: null
model: null
n_candidates: 1
n_interpolate_points: null
n_monte_carlo_samples: 128
name: multi_fidelity
numerical_optimizer:
max_iter: 2000
max_time: 5.0
n_restarts: 20
name: LBFGS
reference_point:
s: 0.0
y1: 1.5
y2: 1.5
supports_batch_generation: true
supports_constraints: true
supports_multi_objective: true
turbo_controller: null
use_cuda: false
use_pf_as_initial_points: false
max_evaluations: null
serialize_inline: false
serialize_torch: false
strict: true
vocs:
constants:
a: dummy_constant
constraints: {}
objectives:
s: MAXIMIZE
y1: MINIMIZE
y2: MINIMIZE
observables: []
variables:
s:
- 0
- 1
x1:
- 0.0
- 3.14159
x2:
- 0.0
- 3.14159
Run optimization routine¶
Instead of ending the optimization routine after an explict number of samples we end optimization once a given optimization budget has been exceeded. WARNING: This will slightly exceed the given budget
budget = BUDGET
while X.generator.calculate_total_cost() < budget:
X.step()
print(
f"n_samples: {len(X.data)} "
f"budget used: {X.generator.calculate_total_cost():.4} "
f"hypervolume: {X.generator.get_pareto_front_and_hypervolume()[-1]:.4}"
)
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[3], line 3 1 budget = BUDGET 2 while X.generator.calculate_total_cost() < budget: ----> 3 X.step() 4 print( 5 f"n_samples: {len(X.data)} " 6 f"budget used: {X.generator.calculate_total_cost():.4} " 7 f"hypervolume: {X.generator.get_pareto_front_and_hypervolume()[-1]:.4}" 8 ) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/xopt/base.py:253, in Xopt.step(self) 251 # generate samples and submit to evaluator 252 logger.debug(f"Generating {n_generate} candidates") --> 253 new_samples = self.generator.generate(n_generate) 255 if new_samples is not None: 256 # Evaluate data 257 self.evaluate_data(new_samples) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/xopt/generators/bayesian/bayesian_generator.py:375, in BayesianGenerator.generate(self, n_candidates) 373 # propose candidates given model 374 start_time = time.perf_counter() --> 375 candidates = self.propose_candidates(model, n_candidates=n_candidates) 376 timing_results["acquisition_optimization"] = ( 377 time.perf_counter() - start_time 378 ) 380 # post process candidates File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/xopt/generators/bayesian/bayesian_generator.py:534, in BayesianGenerator.propose_candidates(self, model, n_candidates) 530 candidates = self.numerical_optimizer.optimize( 531 acq_funct, bounds, n_candidates 532 ) 533 else: --> 534 candidates = self.numerical_optimizer.optimize( 535 acq_funct, bounds, n_candidates, batch_initial_conditions=initial_points 536 ) 537 return candidates File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/xopt/numerical_optimizer.py:126, in LBFGSOptimizer.optimize(self, function, bounds, n_candidates, **kwargs) 123 else: 124 max_time = None --> 126 candidates, _ = optimize_acqf( 127 acq_function=function, 128 bounds=bounds, 129 q=n_candidates, 130 raw_samples=self.n_restarts, 131 num_restarts=self.n_restarts, 132 timeout_sec=max_time, 133 options={"maxiter": self.max_iter}, 134 **kwargs, 135 ) 136 return candidates File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/optim/optimize.py:768, in optimize_acqf(acq_function, bounds, q, num_restarts, raw_samples, options, inequality_constraints, equality_constraints, nonlinear_inequality_constraints, fixed_features, post_processing_func, batch_initial_conditions, return_best_only, gen_candidates, sequential, acq_function_sequence, ic_generator, timeout_sec, return_full_tree, retry_on_optimization_warning, **ic_gen_kwargs) 741 fixed_features = { 742 idx % bounds.shape[-1]: val for idx, val in fixed_features.items() 743 } 745 opt_acqf_inputs = OptimizeAcqfInputs( 746 acq_function=acq_function, 747 bounds=bounds, (...) 766 acq_function_sequence=acq_function_sequence, 767 ) --> 768 return _optimize_acqf(opt_inputs=opt_acqf_inputs) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/optim/optimize.py:789, in _optimize_acqf(opt_inputs) 786 return _optimize_acqf_sequential_q(opt_inputs=opt_inputs) 788 # Batch optimization (including the case q=1) --> 789 return _optimize_acqf_batch(opt_inputs=opt_inputs) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/optim/optimize.py:455, in _optimize_acqf_batch(opt_inputs) 452 batch_acq_values = torch.cat(batch_acq_values_list).flatten() 453 return batch_candidates, batch_acq_values, opt_warnings --> 455 batch_candidates, batch_acq_values, ws = _optimize_batch_candidates() 457 optimization_warning_raised = any( 458 issubclass(w.category, OptimizationWarning) for w in ws 459 ) 460 if optimization_warning_raised and opt_inputs.retry_on_optimization_warning: File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/optim/optimize.py:432, in _optimize_acqf_batch.<locals>._optimize_batch_candidates() 427 with warnings.catch_warnings(record=True) as ws: 428 warnings.simplefilter("always", category=OptimizationWarning) 429 ( 430 batch_candidates_curr, 431 batch_acq_values_curr, --> 432 ) = opt_inputs.gen_candidates( 433 batched_ics_, 434 opt_inputs.acq_function, 435 lower_bounds=lower_bounds, 436 upper_bounds=upper_bounds, 437 options=gen_options, 438 fixed_features={k: v[i] for k, v in batched_fixed_features.items()}, 439 timeout_sec=timeout_sec, 440 **gen_kwargs, 441 ) 442 opt_warnings += ws 443 batch_candidates_list.append(batch_candidates_curr) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/generation/gen.py:345, in gen_candidates_scipy(initial_conditions, acquisition_function, lower_bounds, upper_bounds, inequality_constraints, equality_constraints, nonlinear_inequality_constraints, options, fixed_features, timeout_sec, use_parallel_mode) 336 constraints += make_scipy_nonlinear_inequality_constraints( 337 nonlinear_inequality_constraints=nl_ineq_constraints, 338 f_np_wrapper=f_np_wrapper_, 339 x0=x0, 340 shapeX=candidates_.shape, 341 ) 343 x0 = _arrayify(x0) --> 345 res = minimize_with_timeout( 346 fun=f_np_wrapper_, 347 args=(f,), 348 x0=x0, 349 method=method, 350 jac=with_grad, 351 bounds=bounds, 352 constraints=constraints, 353 callback=options.get("callback", None), 354 options=minimize_options, 355 timeout_sec=timeout_sec / len(split_candidates) 356 if timeout_sec is not None 357 else None, 358 ) 359 _process_scipy_result(res=res, options=options) 360 xs = res.x.reshape(candidates_.shape) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/optim/utils/timeout.py:86, in minimize_with_timeout(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options, timeout_sec) 83 # To prevent slowdowns after scipy 1.15. 84 # See https://github.com/scipy/scipy/issues/22438. 85 with threadpool_limits(limits=1, user_api="blas"): ---> 86 return optimize.minimize( 87 fun=fun, 88 x0=x0, 89 args=args, 90 method=method, 91 jac=jac, 92 hess=hess, 93 hessp=hessp, 94 bounds=bounds, 95 constraints=constraints, 96 tol=tol, 97 callback=wrapped_callback, 98 options=options, 99 ) 100 except OptimizationTimeoutError as e: 101 msg = f"Optimization timed out after {e.runtime} seconds." File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/scipy/optimize/_minimize.py:784, in minimize(fun, x0, args, method, jac, hess, hessp, bounds, constraints, tol, callback, options) 781 res = _minimize_newtoncg(fun, x0, args, jac, hess, hessp, callback, 782 **options) 783 elif meth == 'l-bfgs-b': --> 784 res = _minimize_lbfgsb(fun, x0, args, jac, bounds, 785 callback=callback, **options) 786 elif meth == 'tnc': 787 res = _minimize_tnc(fun, x0, args, jac, bounds, callback=callback, 788 **options) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/scipy/optimize/_lbfgsb_py.py:469, in _minimize_lbfgsb(fun, x0, args, jac, bounds, disp, maxcor, ftol, gtol, eps, maxfun, maxiter, iprint, callback, maxls, finite_diff_rel_step, workers, **unknown_options) 461 _lbfgsb.setulb(m, x, low_bnd, upper_bnd, nbd, f, g, factr, pgtol, wa, 462 iwa, task, lsave, isave, dsave, maxls, ln_task) 464 if task[0] == 3: 465 # The minimization routine wants f and g at the current x. 466 # Note that interruptions due to maxfun are postponed 467 # until the completion of the current minimization iteration. 468 # Overwrite f and g: --> 469 f, g = func_and_grad(x) 470 elif task[0] == 1: 471 # new iteration 472 n_iterations += 1 File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/scipy/optimize/_differentiable_functions.py:403, in ScalarFunction.fun_and_grad(self, x) 401 if not np.array_equal(x, self.x): 402 self._update_x(x) --> 403 self._update_fun() 404 self._update_grad() 405 return self.f, self.g File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/scipy/optimize/_differentiable_functions.py:353, in ScalarFunction._update_fun(self) 351 def _update_fun(self): 352 if not self.f_updated: --> 353 fx = self._wrapped_fun(self.x) 354 self._nfev += 1 355 if fx < self._lowest_f: File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/scipy/_lib/_util.py:590, in _ScalarFunctionWrapper.__call__(self, x) 587 def __call__(self, x): 588 # Send a copy because the user may overwrite it. 589 # The user of this class might want `x` to remain unchanged. --> 590 fx = self.f(np.copy(x), *self.args) 591 self.nfev += 1 593 # Make sure the function returns a true scalar File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/scipy/optimize/_optimize.py:80, in MemoizeJac.__call__(self, x, *args) 78 def __call__(self, x, *args): 79 """ returns the function value """ ---> 80 self._compute_if_needed(x, *args) 81 return self._value File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/scipy/optimize/_optimize.py:74, in MemoizeJac._compute_if_needed(self, x, *args) 72 if not np.all(x == self.x) or self._value is None or self.jac is None: 73 self.x = np.asarray(x).copy() ---> 74 fg = self.fun(x, *args) 75 self.jac = fg[1] 76 self._value = fg[0] File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/generation/gen.py:427, in _get_f_np_wrapper.<locals>.f_np_wrapper(x, f, fixed_features, batch_indices) 421 X_fix = fix_features( 422 X, fixed_features=this_fixed_features, replace_current_value=False 423 ) 424 # we compute the loss on the whole batch, under the assumption that f 425 # treats multiple inputs in the 0th dimension as independent 426 # inputs in a batch --> 427 losses = f(X_fix) 428 loss = losses.sum() 429 # compute gradient w.r.t. the inputs (does not accumulate in leaves) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/generation/gen.py:186, in gen_candidates_scipy.<locals>.f(x) 185 def f(x): --> 186 return -acquisition_function(x) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/torch/nn/modules/module.py:1775, in Module._wrapped_call_impl(self, *args, **kwargs) 1773 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1774 else: -> 1775 return self._call_impl(*args, **kwargs) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/torch/nn/modules/module.py:1786, in Module._call_impl(self, *args, **kwargs) 1781 # If we don't have any hooks, we want to skip the rest of the logic in 1782 # this function, and just call forward. 1783 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks 1784 or _global_backward_pre_hooks or _global_backward_hooks 1785 or _global_forward_hooks or _global_forward_pre_hooks): -> 1786 return forward_call(*args, **kwargs) 1788 result = None 1789 called_always_called_hooks = set() File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/utils/transforms.py:396, in concatenate_pending_points.<locals>.decorated(cls, X, **kwargs) 394 if cls.X_pending is not None: 395 X = torch.cat([X, match_batch_shape(cls.X_pending, X)], dim=-2) --> 396 return method(cls, X, **kwargs) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/utils/transforms.py:299, in t_batch_mode_transform.<locals>.decorator.<locals>.decorated(acqf, X, *args, **kwargs) 297 # add t-batch dim 298 X = X if X.dim() > 2 else X.unsqueeze(0) --> 299 output = method(acqf, X, *args, **kwargs) 300 if assert_output_shape and not _verify_output_shape( 301 acqf=acqf, 302 X=X, 303 output=output, 304 ): 305 raise AssertionError( 306 "Expected the output shape to match either the t-batch shape of " 307 "X, or the `model.batch_shape` in the case of acquisition " (...) 316 ) 317 ) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/xopt/generators/bayesian/custom_botorch/multi_fidelity.py:90, in NMOMF.forward(self, X) 88 # Add previous nehvi from pending points. 89 nhv_gain = self._compute_qehvi(samples=samples, X=X) + self._prev_nehvi ---> 90 cost_weighted_qnehvi = self.cost_aware_utility(X=X, deltas=nhv_gain) 91 return cost_weighted_qnehvi File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/torch/nn/modules/module.py:1775, in Module._wrapped_call_impl(self, *args, **kwargs) 1773 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1774 else: -> 1775 return self._call_impl(*args, **kwargs) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/torch/nn/modules/module.py:1786, in Module._call_impl(self, *args, **kwargs) 1781 # If we don't have any hooks, we want to skip the rest of the logic in 1782 # this function, and just call forward. 1783 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks 1784 or _global_backward_pre_hooks or _global_backward_hooks 1785 or _global_forward_hooks or _global_forward_pre_hooks): -> 1786 return forward_call(*args, **kwargs) 1788 result = None 1789 called_always_called_hooks = set() File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/acquisition/cost_aware.py:206, in InverseCostWeightedUtility.forward(self, X, deltas, sampler, X_evaluation_mask) 204 # Ensure that costs are positive 205 if not torch.all(cost > 0.0): --> 206 raise ValueError( 207 "Costs must be strictly positive. Consider clamping cost_objective." 208 ) 210 # sum costs along q-batch 211 cost = cost.sum(dim=-1) ValueError: Costs must be strictly positive. Consider clamping cost_objective.
Show results¶
X.data
| x1 | x2 | s | a | y1 | y2 | c1 | c2 | xopt_runtime | xopt_error | |
|---|---|---|---|---|---|---|---|---|---|---|
| 0 | 1.00 | 0.75 | 0.0 | dummy_constant | 1.00 | 0.75 | 0.626888 | 0.3125 | 0.000161 | False |
| 1 | 0.75 | 1.00 | 0.1 | dummy_constant | 0.75 | 1.00 | 0.626888 | 0.3125 | 0.000134 | False |
Plot results¶
Here we plot the resulting observations in input space, colored by feasibility (neglecting the fact that these data points are at varying fidelities).
fig, ax = plt.subplots()
theta = np.linspace(0, np.pi / 2)
r = np.sqrt(1 + 0.1 * np.cos(16 * theta))
x_1 = r * np.sin(theta)
x_2_lower = r * np.cos(theta)
x_2_upper = (0.5 - (x_1 - 0.5) ** 2) ** 0.5 + 0.5
z = np.zeros_like(x_1)
# ax2.plot(x_1, x_2_lower,'r')
ax.fill_between(x_1, z, x_2_lower, fc="white")
circle = plt.Circle(
(0.5, 0.5), 0.5**0.5, color="r", alpha=0.25, zorder=0, label="Valid Region"
)
ax.add_patch(circle)
history = pd.concat(
[X.data, tnk_vocs.feasibility_data(X.data)], axis=1, ignore_index=False
)
ax.plot(*history[["x1", "x2"]][history["feasible"]].to_numpy().T, ".C1")
ax.plot(*history[["x1", "x2"]][~history["feasible"]].to_numpy().T, ".C2")
ax.set_xlim(0, 3.14)
ax.set_ylim(0, 3.14)
ax.set_xlabel("x1")
ax.set_ylabel("x2")
ax.set_aspect("equal")
Plot path through input space¶
ax = history.hist(["x1", "x2", "s"], bins=20)
history.plot(y=["x1", "x2", "s"])
<Axes: >
Plot the acqusisition function¶
Here we plot the acquisition function at a small set of fidelities $[0, 0.5, 1.0]$.
# plot the acquisition function
bounds = X.generator.vocs.bounds
model = X.generator.model
# create mesh over non-fidelity parameters
n = 50
x = torch.linspace(*bounds.T[1], n)
y = torch.linspace(*bounds.T[2], n)
xx, yy = torch.meshgrid(x, y)
# plot function(s) at a single fidelity parameter
fidelities = [0.0, 0.5, 1.0]
for fidelity in fidelities:
pts = torch.hstack([ele.reshape(-1, 1) for ele in (xx, yy)]).double()
pts = torch.cat((torch.ones(pts.shape[0], 1) * fidelity, pts), dim=-1)
acq_func = X.generator.get_acquisition(model)
with torch.no_grad():
acq_pts = pts.unsqueeze(1)
acq = acq_func(acq_pts)
fig, ax = plt.subplots()
xxn, yyn = xx.numpy(), yy.numpy()
c = ax.pcolor(xxn, yyn, acq.reshape(n, n), cmap="Blues")
fig.colorbar(c)
ax.set_title(f"Acquisition function - s: {fidelity}")
ax.plot(*history[["x1", "x2"]][history["feasible"]].to_numpy().T, ".C1")
ax.plot(*history[["x1", "x2"]][~history["feasible"]].to_numpy().T, ".C2")
ax.plot(*history[["x1", "x2"]].to_numpy()[-1].T, "+")
candidate = pd.DataFrame(X.generator.generate(1), index=[0])
print(candidate[["x1", "x2"]].to_numpy())
ax.plot(*candidate[["x1", "x2"]].to_numpy()[0], "o")
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) Cell In[8], line 21 19 with torch.no_grad(): 20 acq_pts = pts.unsqueeze(1) ---> 21 acq = acq_func(acq_pts) 23 fig, ax = plt.subplots() 25 xxn, yyn = xx.numpy(), yy.numpy() File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/torch/nn/modules/module.py:1775, in Module._wrapped_call_impl(self, *args, **kwargs) 1773 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1774 else: -> 1775 return self._call_impl(*args, **kwargs) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/torch/nn/modules/module.py:1786, in Module._call_impl(self, *args, **kwargs) 1781 # If we don't have any hooks, we want to skip the rest of the logic in 1782 # this function, and just call forward. 1783 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks 1784 or _global_backward_pre_hooks or _global_backward_hooks 1785 or _global_forward_hooks or _global_forward_pre_hooks): -> 1786 return forward_call(*args, **kwargs) 1788 result = None 1789 called_always_called_hooks = set() File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/utils/transforms.py:396, in concatenate_pending_points.<locals>.decorated(cls, X, **kwargs) 394 if cls.X_pending is not None: 395 X = torch.cat([X, match_batch_shape(cls.X_pending, X)], dim=-2) --> 396 return method(cls, X, **kwargs) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/utils/transforms.py:299, in t_batch_mode_transform.<locals>.decorator.<locals>.decorated(acqf, X, *args, **kwargs) 297 # add t-batch dim 298 X = X if X.dim() > 2 else X.unsqueeze(0) --> 299 output = method(acqf, X, *args, **kwargs) 300 if assert_output_shape and not _verify_output_shape( 301 acqf=acqf, 302 X=X, 303 output=output, 304 ): 305 raise AssertionError( 306 "Expected the output shape to match either the t-batch shape of " 307 "X, or the `model.batch_shape` in the case of acquisition " (...) 316 ) 317 ) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/xopt/generators/bayesian/custom_botorch/multi_fidelity.py:90, in NMOMF.forward(self, X) 88 # Add previous nehvi from pending points. 89 nhv_gain = self._compute_qehvi(samples=samples, X=X) + self._prev_nehvi ---> 90 cost_weighted_qnehvi = self.cost_aware_utility(X=X, deltas=nhv_gain) 91 return cost_weighted_qnehvi File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/torch/nn/modules/module.py:1775, in Module._wrapped_call_impl(self, *args, **kwargs) 1773 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc] 1774 else: -> 1775 return self._call_impl(*args, **kwargs) File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/torch/nn/modules/module.py:1786, in Module._call_impl(self, *args, **kwargs) 1781 # If we don't have any hooks, we want to skip the rest of the logic in 1782 # this function, and just call forward. 1783 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks 1784 or _global_backward_pre_hooks or _global_backward_hooks 1785 or _global_forward_hooks or _global_forward_pre_hooks): -> 1786 return forward_call(*args, **kwargs) 1788 result = None 1789 called_always_called_hooks = set() File ~/miniconda3/envs/xopt-dev/lib/python3.13/site-packages/botorch/acquisition/cost_aware.py:206, in InverseCostWeightedUtility.forward(self, X, deltas, sampler, X_evaluation_mask) 204 # Ensure that costs are positive 205 if not torch.all(cost > 0.0): --> 206 raise ValueError( 207 "Costs must be strictly positive. Consider clamping cost_objective." 208 ) 210 # sum costs along q-batch 211 cost = cost.sum(dim=-1) ValueError: Costs must be strictly positive. Consider clamping cost_objective.
# examine lengthscale of the first objective
list(model.models[0].named_parameters())
[('likelihood.noise_covar.raw_noise',
Parameter containing:
tensor([-91.2884], dtype=torch.float64, requires_grad=True)),
('mean_module.raw_constant',
Parameter containing:
tensor(1.2374e-14, dtype=torch.float64, requires_grad=True)),
('covar_module.raw_lengthscale',
Parameter containing:
tensor([[0.3215, 0.0602, 0.3349]], dtype=torch.float64, requires_grad=True))]