Skip to content

Bayesian Utilities

compute_hypervolume_and_pf(X, Y, reference_point)

Compute the hypervolume and pareto front given a set of points assuming maximization.

Parameters:

Name Type Description Default
X Tensor

The input points.

required
Y Tensor

The objective values of the points.

required
reference_point Tensor

The reference point for hypervolume calculation.

required

Returns:

Name Type Description
pareto_front_X Tensor

The points on the Pareto front. Returns None if no pareto front exists.

pareto_front_Y Tensor

The objective values of the points on the Pareto front. Returns None if no pareto front exists.

pareto_mask Tensor

A boolean mask indicating which points are on the Pareto front. Returns None if no pareto front exists.

hv_value float

The hypervolume value.

Source code in xopt/generators/bayesian/utils.py
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def compute_hypervolume_and_pf(
    X: torch.Tensor,
    Y: torch.Tensor,
    reference_point: torch.Tensor,
) -> tuple[torch.Tensor | None, torch.Tensor | None, torch.Tensor | None, float]:
    """
    Compute the hypervolume and pareto front
    given a set of points assuming maximization.

    Parameters
    ----------
    X : torch.Tensor
        The input points.
    Y : torch.Tensor
        The objective values of the points.
    reference_point : torch.Tensor
        The reference point for hypervolume calculation.

    Returns
    -------
    pareto_front_X : torch.Tensor
        The points on the Pareto front. Returns None if no pareto front exists.
    pareto_front_Y : torch.Tensor
        The objective values of the points on the Pareto front. Returns None if no pareto front exists.
    pareto_mask : torch.Tensor
        A boolean mask indicating which points are on the Pareto front.
        Returns None if no pareto front exists.
    hv_value : float
        The hypervolume value.
    """

    hv = Hypervolume(reference_point)
    if Y.shape[0] == 0:
        return None, None, None, 0.0

    # add the reference point to the objective values
    # add a dummy point to the X values
    X = torch.vstack((torch.zeros(1, X.shape[1], dtype=X.dtype), X))
    Y = torch.vstack((reference_point.unsqueeze(0), Y))

    pareto_mask = is_non_dominated(Y)

    # if the first point is in the pareto front then
    # none of the points dominate over the reference
    if pareto_mask[0]:
        return None, None, None, 0.0

    # get pareto front points
    pareto_front_X = X[pareto_mask]
    pareto_front_Y = Y[pareto_mask]
    hv_value = hv.compute(Y[pareto_mask].cpu())

    return pareto_front_X, pareto_front_Y, pareto_mask, hv_value

get_training_data(input_names, outcome_name, data)

Creates training data from input data frame.

Parameters:

Name Type Description Default
input_names List[str]

List of input feature names.

required
outcome_name str

Name of the outcome variable.

required
data DataFrame

DataFrame containing input and outcome data.

required

Returns:

Type Description
tuple[Tensor, Tensor, Tensor]

Tuple containing training input tensor (train_X), training outcome tensor ( train_Y), and training outcome variance tensor (train_Yvar).

Notes

The function handles NaN values, removing rows with NaN values in any of the input variables.

If the DataFrame contains a column named <outcome_name>_var, the function returns a tensor for the outcome variance (train_Yvar); otherwise, train_Yvar is None.

Source code in xopt/generators/bayesian/utils.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def get_training_data(
    input_names: List[str], outcome_name: str, data: pd.DataFrame
) -> (torch.Tensor, torch.Tensor):
    """
    Creates training data from input data frame.

    Parameters
    ----------
    input_names : List[str]
        List of input feature names.

    outcome_name : str
        Name of the outcome variable.

    data : pd.DataFrame
        DataFrame containing input and outcome data.

    Returns
    -------
    tuple[torch.Tensor, torch.Tensor, torch.Tensor]
        Tuple containing training input tensor (train_X), training outcome tensor (
        train_Y), and training outcome variance tensor (train_Yvar).

    Notes
    -----

    The function handles NaN values, removing rows with NaN values in any of the
    input variables.

    If the DataFrame contains a column named `<outcome_name>_var`, the function
    returns a tensor for the outcome variance (train_Yvar); otherwise, train_Yvar is
    None.

    """

    input_data = data[input_names]
    outcome_data = data[outcome_name]

    # cannot use any rows where any variable values are nans
    non_nans = ~input_data.isnull().T.any()
    input_data = input_data[non_nans]
    outcome_data = outcome_data[non_nans]

    train_X = torch.tensor(
        input_data[~outcome_data.isnull()].to_numpy(dtype="double").copy()
    )
    train_Y = torch.tensor(
        outcome_data[~outcome_data.isnull()].to_numpy(dtype="double").copy()
    ).unsqueeze(-1)

    train_Yvar = None
    if f"{outcome_name}_var" in data:
        variance_data = data[f"{outcome_name}_var"][non_nans]
        train_Yvar = torch.tensor(
            variance_data[~outcome_data.isnull()].to_numpy(dtype="double").copy()
        ).unsqueeze(-1)

    return train_X, train_Y, train_Yvar

get_training_data_batched(input_names, outcome_names, data, batch_mode=False)

Get data for multiple outcomes. Valid points have no NaNs for all inputs and outcomes.

Parameters:

Name Type Description Default
batch_mode bool

If false, not unrolled - will be done by SingleTaskGP. If true, unrolls the data so that each outcome is treated as a separate task in a batch mode model.

False

Returns:

Type Description
tuple[Tensor, Tensor, Tensor]

train_X n x d or m x n x d train_Y n x m or m x n x 1 train_Yvar n x m or m x n x 1

Source code in xopt/generators/bayesian/utils.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
def get_training_data_batched(
    input_names: List[str],
    outcome_names: List[str],
    data: pd.DataFrame,
    batch_mode: bool = False,
) -> (torch.Tensor, torch.Tensor, torch.Tensor):
    """
    Get data for multiple outcomes. Valid points have no NaNs for all inputs and outcomes.

    Parameters
    ----------
    batch_mode: bool
        If false, not unrolled - will be done by SingleTaskGP. If true, unrolls the data
        so that each outcome is treated as a separate task in a batch mode model.

    Returns
    -------
    tuple[torch.Tensor, torch.Tensor, torch.Tensor]
        train_X `n x d` or `m x n x d`
        train_Y `n x m` or `m x n x 1`
        train_Yvar `n x m` or `m x n x 1`
    """
    input_data = data[input_names]
    outcome_data = data[outcome_names]

    non_nans_input = ~input_data.isnull().T.any()
    non_nans_output = ~outcome_data.isnull().T.any()
    non_nans_all = non_nans_input & non_nans_output
    input_data = input_data[non_nans_all]
    outcome_data = outcome_data[non_nans_all]

    train_X = torch.tensor(input_data.to_numpy(dtype="double"))

    train_Y = torch.tensor(outcome_data[outcome_names].to_numpy(dtype="double"))

    train_Yvar = None
    yvar_names = [f"{outcome}_var" for outcome in outcome_names]
    have_yvar = [x in data for x in yvar_names]
    if all(have_yvar):
        train_Yvar = torch.tensor(
            data.loc[non_nans_all, yvar_names].to_numpy(dtype="double")
        )
    elif not any(have_yvar):
        # no var
        pass
    else:
        # partial - not allowed
        raise ValueError("either all or none of the outcomes must have variance data")

    if batch_mode:
        train_X, train_Y, train_Yvar = multioutput_to_batch_mode_transform(
            train_X, train_Y, len(outcome_names), train_Yvar
        )
        train_Y = train_Y.unsqueeze(-1)
        if train_Yvar is not None:
            train_Yvar = train_Yvar.unsqueeze(-1)
    return train_X, train_Y, train_Yvar

interpolate_points(df, num_points=10)

Generates interpolated points between two points specified by a pandas DataFrame.

Parameters:

Name Type Description Default
df

with two rows representing the start and end points.

required
num_points

Number of points to generate between the start and end points.

10

Returns:

Name Type Description
result DataFrame

DataFrame with the interpolated points.

Source code in xopt/generators/bayesian/utils.py
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
def interpolate_points(df, num_points=10):
    """
    Generates interpolated points between two points specified by a pandas DataFrame.

    Parameters
    ----------
    df: DataFrame
        with two rows representing the start and end points.
    num_points: int
        Number of points to generate between the start and end points.

    Returns
    -------
    result: DataFrame
        DataFrame with the interpolated points.
    """
    if df.shape[0] != 2:
        raise ValueError("Input DataFrame must have exactly two rows.")

    start_point = df.iloc[0]
    end_point = df.iloc[1]

    # Create an array of num_points equally spaced between 0 and 1
    interpolation_factors = np.linspace(0, 1, num_points + 1)

    # Interpolate each column independently
    interpolated_points = pd.DataFrame()
    for col in df.columns:
        interpolated_values = np.interp(
            interpolation_factors, [0, 1], [start_point[col], end_point[col]]
        )
        interpolated_points[col] = interpolated_values[1:]

    return interpolated_points

rectilinear_domain_union(A, B)

Calculate the union of two rectilinear domains represented by input bounds A and B.

Parameters:

Name Type Description Default
A Tensor

Input bounds for domain A. It should have shape (2, N) where N is the number of dimensions. The first row contains the lower bounds, and the second row contains the upper bounds.

required
B Tensor

Input bounds for domain B. It should have the same shape as A.

required

Returns:

Type Description
Tensor

Output bounds representing the rectilinear domain that is the union of A and B.

Raises:

Type Description
AssertionError

If the shape of A is not (2, N) or if the shape of A and B are not the same.

Notes
  • The function assumes that the input bounds represent a rectilinear domain in N-dimensional space. - The output bounds represent the rectilinear domain obtained by taking the union of the input domains. - The lower bounds of the output domain are computed as the element-wise maximum of the lower bounds of A and B. - The upper bounds of the output domain are computed as the element-wise minimum of the upper bounds of A and B.

Examples:

>>> A = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
>>> B = torch.tensor([[0.5, 1.5], [2.5, 3.5]])
>>> result = rectilinear_domain_union(A, B)
>>> print(result)
tensor([[0.5, 1.0],
        [2.5, 3.0]])
Source code in xopt/generators/bayesian/utils.py
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
def rectilinear_domain_union(A: torch.Tensor, B: torch.Tensor) -> torch.Tensor:
    """
    Calculate the union of two rectilinear domains represented by input bounds A and B.

    Parameters
    ----------
    A : torch.Tensor
        Input bounds for domain A. It should have shape (2, N) where N is the number
        of dimensions. The first row contains the lower bounds, and the second row
        contains the upper bounds.

    B : torch.Tensor
        Input bounds for domain B. It should have the same shape as A.

    Returns
    -------
    torch.Tensor
        Output bounds representing the rectilinear domain that is the union of A and B.

    Raises
    ------
    AssertionError
        If the shape of A is not (2, N) or if the shape of A and B are not the same.

    Notes
    -----

    - The function assumes that the input bounds represent a rectilinear domain in
    N-dimensional space. - The output bounds represent the rectilinear domain
    obtained by taking the union of the input domains. - The lower bounds of the
    output domain are computed as the element-wise maximum of the lower bounds of A
    and B. - The upper bounds of the output domain are computed as the element-wise
    minimum of the upper bounds of A and B.

    Examples
    --------
    >>> A = torch.tensor([[0.0, 1.0], [2.0, 3.0]])
    >>> B = torch.tensor([[0.5, 1.5], [2.5, 3.5]])
    >>> result = rectilinear_domain_union(A, B)
    >>> print(result)
    tensor([[0.5, 1.0],
            [2.5, 3.0]])
    """
    assert A.shape[0] == 2, "A should have shape (2, N)"
    assert A.shape == B.shape, (
        "Shapes of A and B should be the same, current shapes "
        f"are {A.shape} and {B.shape}"
    )

    out_bounds = torch.clone(A)

    out_bounds[0, :] = torch.max(A[0, :], B[0, :])
    out_bounds[1, :] = torch.min(A[1, :], B[1, :])

    return out_bounds

set_botorch_weights(vocs)

set weights to multiply xopt objectives or observables for botorch objectives

Source code in xopt/generators/bayesian/utils.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
def set_botorch_weights(vocs: VOCS):
    """set weights to multiply xopt objectives or observables for botorch objectives"""
    output_names = vocs.output_names

    weights = torch.zeros(len(output_names), dtype=torch.double)

    # if objectives exist this is an optimization problem
    # set weights according to the index of the models -- corresponds to the
    # ordering of output names
    for objective_name in vocs.objective_names:
        if isinstance(vocs.objectives[objective_name], MinimizeObjective):
            weights[output_names.index(objective_name)] = -1.0
        elif isinstance(vocs.objectives[objective_name], MaximizeObjective):
            weights[output_names.index(objective_name)] = 1.0
        elif isinstance(vocs.objectives[objective_name], ExploreObjective):
            weights[output_names.index(objective_name)] = 1.0

    return weights

torch_compile_acqf(acq, vocs, tkwargs, backend='inductor', mode='default', verify=True)

Compile an acquisition function using torch.compile.

Parameters:

Name Type Description Default
acq AcquisitionFunction

The acquisition function to compile.

required
vocs VOCS

VOCS

required
tkwargs dict

The keyword arguments for the torch tensor.

required
backend str

The backend for torch.compile, by default "inductor".

'inductor'
mode str

The mode for torch.compile, by default "default".

'default'
verify bool

If True, do the verification vs eager mode.

True
Source code in xopt/generators/bayesian/utils.py
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
def torch_compile_acqf(
    acq: AcquisitionFunction,
    vocs: VOCS,
    tkwargs: dict,
    backend: str = "inductor",
    mode="default",
    verify: bool = True,
):
    """
    Compile an acquisition function using torch.compile.

    Parameters
    ----------
    acq : AcquisitionFunction
        The acquisition function to compile.
    vocs : VOCS
        VOCS
    tkwargs : dict
        The keyword arguments for the torch tensor.
    backend : str, optional
        The backend for torch.compile, by default "inductor".
    mode : str, optional
        The mode for torch.compile, by default "default".
    verify : bool, optional
        If True, do the verification vs eager mode.
    """
    # TODO: check if trace mode better
    # NOTE: is verify is False, you need to ensure tensors are copied before calling
    # or RuntimeError: Error: accessing tensor output of CUDAGraphs that has been overwritten by a subsequent run
    with gpytorch.settings.fast_pred_var(), gpytorch.settings.trace_mode():
        # assume that only a few shapes will happen - batch=1 and batch=nsamples
        saqcf = torch.compile(acq, backend=backend, mode=mode, dynamic=False)
        if verify:
            rand_point = random_inputs(vocs)[0]
            rand_vec = torch.stack(
                [rand_point[k] * torch.ones(1) for k in vocs.variable_names], dim=1
            )
            test_x = rand_vec
            test_x = test_x.unsqueeze(-2).to(**tkwargs)  # 1 x 1 x d
            acq_value = acq(test_x.clone().detach())
            sacq_value = saqcf(test_x.clone().detach())
            assert torch.allclose(acq_value, sacq_value, rtol=1e-10), (
                f"Compiled acquisition != original {acq_value=} {sacq_value=}"
            )
    return saqcf

torch_compile_gp_model(model, vocs, tkwargs, backend='inductor', mode='default', posterior=True, grad=False)

Compile a GPyTorch model using torch.compile, returning a compiled module

Parameters:

Name Type Description Default
model Model

The GPyTorch model to compile.

required
vocs VOCS

VOCS

required
tkwargs dict

The keyword arguments for the torch tensor.

required
backend str

The backend for torch.compile, by default "inductor".

'inductor'
mode str

The mode for torch.compile, by default "default".

'default'
posterior bool

If True, prime the model by using posterior method, otherwise call directly (this invokes gpytorch posterior).

True
grad bool

If True, use gradient context, otherwise use no gradient context.

False
Source code in xopt/generators/bayesian/utils.py
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
def torch_compile_gp_model(
    model: Model,
    vocs: VOCS,
    tkwargs: dict,
    backend: str = "inductor",
    mode="default",
    posterior=True,
    grad=False,
):
    """
    Compile a GPyTorch model using torch.compile, returning a compiled module

    Parameters
    ----------
    model : Model
        The GPyTorch model to compile.
    vocs : VOCS
        VOCS
    tkwargs : dict
        The keyword arguments for the torch tensor.
    backend : str, optional
        The backend for torch.compile, by default "inductor".
    mode : str, optional
        The mode for torch.compile, by default "default".
    posterior : bool, optional
        If True, prime the model by using posterior method, otherwise call directly (this invokes gpytorch posterior).
    grad : bool, optional
        If True, use gradient context, otherwise use no gradient context.
    """
    if isinstance(model, ModelListGP):
        raise ValueError("ModelListGP is not supported - use individual models")
    rand_point = random_inputs(vocs)[0]
    rand_vec = torch.stack(
        [rand_point[k] * torch.ones(1) for k in vocs.variable_names], dim=1
    )
    test_x = rand_vec.to(**tkwargs)

    gradctx = nullcontext if grad else torch.no_grad()
    # TODO: check if gpytorch trace mode faster
    with gradctx, gpytorch.settings.fast_pred_var():
        model.eval()
        if posterior:
            pred = model.posterior(test_x)
            traced_model = torch.compile(
                model, backend=backend, mode=mode, dynamic=None
            )
            mvn = traced_model.posterior(test_x)
        else:
            pred = model(test_x)
            traced_model = torch.compile(
                model, backend=backend, mode=mode, dynamic=None
            )
            mvn = traced_model(test_x)
        traced_mean, traced_var = mvn.mean, mvn.variance
        assert torch.allclose(pred.mean, traced_mean, rtol=0), (
            f"Compiled mean != original {pred.mean=} {traced_mean=}"
        )
        assert torch.allclose(pred.variance, traced_var, rtol=0), (
            f"Compiled variance != original: {pred.variance=} {traced_var=}"
        )

    return traced_model

torch_trace_acqf(acq, vocs, tkwargs)

Trace an acquisition function using torch.jit.trace.

Parameters:

Name Type Description Default
acq AcquisitionFunction

The acquisition function to trace.

required
vocs VOCS

VOCS

required
tkwargs dict

The keyword arguments for the torch tensor.

required
Source code in xopt/generators/bayesian/utils.py
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
def torch_trace_acqf(
    acq: AcquisitionFunction, vocs: VOCS, tkwargs: dict
) -> torch.jit.ScriptModule:
    """
    Trace an acquisition function using torch.jit.trace.

    Parameters
    ----------
    acq : AcquisitionFunction
        The acquisition function to trace.
    vocs : VOCS
        VOCS
    tkwargs : dict
        The keyword arguments for the torch tensor.
    """
    # Note that this is very fragile for when we mix q=1 and q>1 because tensors ndims changes
    rand_point = random_inputs(vocs)[0]
    rand_vec = torch.stack(
        [rand_point[k] * torch.ones(1) for k in vocs.variable_names], dim=1
    )
    test_x = rand_vec.to(**tkwargs)
    test_x = test_x.unsqueeze(-2)
    with gpytorch.settings.fast_pred_var(), gpytorch.settings.trace_mode():
        # Need dummy evaluation to set caches
        acq(test_x.clone().detach())
        saqcf = torch.jit.trace(
            acq,
            example_inputs=test_x.clone().detach(),
            check_trace=True,
            check_tolerance=1e-8,
        )
    return saqcf

torch_trace_gp_model(model, vocs, tkwargs, posterior=True, grad=False, batch_size=1, verify=False)

Trace a GPyTorch model using torch.jit.trace. Note that resulting object will return mean and variance directly, NOT a multivariate normal.

Parameters:

Name Type Description Default
model Model

The GPyTorch model to compile.

required
vocs VOCS

VOCS

required
tkwargs dict

The keyword arguments for the torch tensor.

required
posterior bool

If True, prime the model by using posterior method, otherwise call directly (this invokes gpytorch posterior).

True
grad bool

If True, use gradient context, otherwise use no gradient context.

False
batch_size int

The batch size for the input tensor for tracing, by default 1.

1
verify bool

If True, request that torch verify the trace by comparing to eager mode, by default False.

False
Source code in xopt/generators/bayesian/utils.py
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
def torch_trace_gp_model(
    model: Model,
    vocs: VOCS,
    tkwargs: dict,
    posterior: bool = True,
    grad: bool = False,
    batch_size: int = 1,
    verify: bool = False,
) -> torch.jit.ScriptModule:
    """
    Trace a GPyTorch model using torch.jit.trace. Note that resulting object will return mean and variance directly,
    NOT a multivariate normal.

    Parameters
    ----------
    model : Model
        The GPyTorch model to compile.
    vocs : VOCS
        VOCS
    tkwargs : dict
        The keyword arguments for the torch tensor.
    posterior : bool, optional
        If True, prime the model by using posterior method, otherwise call directly (this invokes gpytorch posterior).
    grad : bool, optional
        If True, use gradient context, otherwise use no gradient context.
    batch_size : int, optional
        The batch size for the input tensor for tracing, by default 1.
    verify : bool, optional
        If True, request that torch verify the trace by comparing to eager mode, by default False.
    """
    if isinstance(model, ModelListGP):
        raise ValueError(
            "ModelListGP is not supported for JIT tracing - use individual models"
        )
    rand_point = random_inputs(vocs)[0]
    rand_vec = torch.stack(
        [rand_point[k] * torch.ones(batch_size) for k in vocs.variable_names], dim=1
    )
    test_x = rand_vec.to(**tkwargs)
    # test_x_1 = test_x[:1,...]

    gradctx = nullcontext() if grad else torch.no_grad()
    model.eval()
    with gradctx, gpytorch.settings.fast_pred_var(), gpytorch.settings.trace_mode():
        if posterior:
            pred = model.posterior(test_x)
            traced_model = torch.jit.trace(
                MeanVarModelWrapperPosterior(model), test_x, check_trace=False
            )
            traced_model = torch.jit.optimize_for_inference(traced_model)
        else:
            pred = model(test_x)
            traced_model = torch.jit.trace(
                MeanVarModelWrapper(model), test_x, check_trace=False
            )
            traced_model = torch.jit.optimize_for_inference(traced_model)
        if verify:
            traced_mean, traced_var = traced_model(test_x)
            assert torch.allclose(pred.mean, traced_mean, rtol=0), (
                f"JIT traced mean != original {pred.mean=} {traced_mean=}"
            )
            assert torch.allclose(pred.variance, traced_var, rtol=0), (
                f"JIT traced variance != original: {pred.variance=} {traced_var=}"
            )

    return traced_model.to(**tkwargs)

validate_turbo_controller_base(value, valid_controller_types, info)

Validate turbo controller input

Source code in xopt/generators/bayesian/utils.py
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
def validate_turbo_controller_base(
    value: Any,
    valid_controller_types: list[type[TurboController]],
    info: ValidationInfo,
):
    """Validate turbo controller input"""

    # get string names of available controller types
    controller_types = {
        controller.__name__: controller for controller in valid_controller_types
    }

    vocs = info.data.get("vocs", None)
    if vocs is None:
        raise ValueError("vocs must be provided to validate turbo controller")

    if isinstance(value, str):
        # handle old string input
        if value == "optimize":
            value = "OptimizeTurboController"
        elif value == "safety":
            value = "SafetyTurboController"

        # create turbo controller from string input
        if value in controller_types:
            value = controller_types[value](vocs=vocs)
        else:
            raise ValueError(
                f"{value} not found, available values are {controller_types.keys()}"
            )
    elif isinstance(value, dict):
        value = cast(dict[str, Any], value)
        value_copy = deepcopy(value)
        # create turbo controller from dict input
        if "name" not in value:
            raise ValueError("turbo input dict needs to have a `name` attribute")
        name = value_copy.pop("name")
        if name in controller_types:
            # pop unnecessary elements
            for ele in ["dim", "vocs"]:
                value_copy.pop(ele, None)

            value = controller_types[name](vocs=vocs, **value_copy)
        else:
            raise ValueError(
                f"{value} not found, available values are {controller_types.keys()}"
            )

    # check if turbo controller is compatabile with the generator
    for controller_type in valid_controller_types:
        if isinstance(value, controller_type):
            return value
    else:
        raise ValueError(
            f"Turbo controller of type {type(value)} not allowed for this generator. Valid types are {valid_controller_types}"
        )