Skip to content

Multi-Objective Bayesian Optimization

MOBOGenerator

Bases: MultiObjectiveBayesianGenerator

Implements Multi-Objective Bayesian Optimization using the Log Expected Hypervolume Improvement acquisition function.

Attributes:

Name Type Description
use_pf_as_initial_points bool

Flag to specify if Pareto front points are to be used during optimization of the acquisition function.

Methods:

Name Description
get_acquisition

Get the acquisition function for Bayesian Optimization.

Source code in xopt/generators/bayesian/mobo.py
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
class MOBOGenerator(MultiObjectiveBayesianGenerator):
    """
    Implements Multi-Objective Bayesian Optimization using the Log Expected
    Hypervolume Improvement acquisition function.

    Attributes
    ----------
    use_pf_as_initial_points : bool
        Flag to specify if Pareto front points are to be used during optimization
        of the acquisition function.

    Methods
    -------
    get_acquisition(self, model: torch.nn.Module) -> FixedFeatureAcquisitionFunction
        Get the acquisition function for Bayesian Optimization.
    """

    name = "mobo"
    supports_batch_generation: bool = True
    supports_constraints: bool = True
    use_pf_as_initial_points: bool = Field(
        False,
        description="flag to specify if pareto front points are to be used during "
        "optimization of the acquisition function",
    )
    __doc__ = """Implements Multi-Objective Bayesian Optimization using the Log Expected
            Hypervolume Improvement acquisition function"""

    _compatible_turbo_controllers = [SafetyTurboController]

    @field_validator("custom_objective", mode="before")
    @classmethod
    def validate_custom_objective(cls, value):
        if value is not None:
            raise ValueError("custom objectives are not supported for MOBOGenerator")
        return value

    def _get_objective(self) -> MCMultiOutputObjective:
        """
        Create the multi-objective Bayesian optimization objective.
        """
        objective = create_mobo_objective(self.vocs)

        return objective.to(**self.tkwargs)

    def get_acquisition(self, model: torch.nn.Module):
        """
        Get the acquisition function for Bayesian Optimization.
        Note that this needs to overwrite the base method due to
        how qLogExpectedHypervolumeImprovement handles constraints.

        Parameters
        ----------
        model : torch.nn.Module
            The model used for Bayesian Optimization.

        Returns
        -------
        FixedFeatureAcquisitionFunction
            The acquisition function.
        """
        if model is None:
            raise ValueError("model cannot be None")

        # get base acquisition function
        acq = self._get_acquisition(model)

        # apply fixed features if specified in the generator
        acq = self._apply_fixed_features(acq)

        acq = acq.to(**self.tkwargs)
        return acq

    def _get_acquisition(
        self, model: torch.nn.Module
    ) -> qLogNoisyExpectedHypervolumeImprovement:
        """
        Create the Log Expected Hypervolume Improvement acquisition function.

        Parameters
        ----------
        model : torch.nn.Module
            The model used for Bayesian Optimization.

        Returns
        -------
        qLogNoisyExpectedHypervolumeImprovement
            The Log Expected Hypervolume Improvement acquisition function.
        """
        inputs = self.get_input_data(self.data)
        sampler = self._get_sampler(model)

        acq = qLogNoisyExpectedHypervolumeImprovement(
            model,
            X_baseline=inputs,
            constraints=self._get_constraint_callables(),
            ref_point=self.torch_reference_point,
            sampler=sampler,
            objective=self._get_objective(),
            cache_root=False,
            prune_baseline=True,
        )
        return acq

    def _get_initial_conditions(self, n_candidates: int = 1) -> Optional[Tensor]:
        """
        Generate initial candidates for optimizing the acquisition function based on
        the Pareto front.

        If `use_pf_as_initial_points` flag is set to true then the current
        Pareto-optimal set is used as initial points for optimizing the acquisition
        function instead of randomly selected points (random points fill in the set
        if `num_restarts` is greater than the number of points in the Pareto set).

        Parameters
        ----------
        n_candidates : int, optional
            The number of candidates to generate, by default 1.

        Returns
        -------
        Optional[Tensor]
            A `num_restarts x q x d` tensor of initial conditions, or None if the
            Pareto front is not used.
        """
        if self.use_pf_as_initial_points:
            if isinstance(self.numerical_optimizer, LBFGSOptimizer):
                bounds = self._get_optimization_bounds()
                num_restarts = self.numerical_optimizer.n_restarts

                pf_locations, _, _, _ = self.get_pareto_front_and_hypervolume()

                # if there is no pareto front just return None to revert back to
                # default behavior
                if pf_locations is None:
                    return None

                initial_points = torch.clone(pf_locations)

                # add the q dimension
                initial_points = initial_points.unsqueeze(1)
                initial_points = initial_points.expand([-1, n_candidates, -1])

                # initial_points must equal the number of restarts
                if len(initial_points) < num_restarts:
                    # add random points to the list inside the bounds
                    sobol_samples = draw_sobol_samples(
                        bounds, num_restarts - len(initial_points), n_candidates
                    )

                    initial_points = torch.cat([initial_points, sobol_samples])
                elif len(initial_points) > num_restarts:
                    # if there are too many select the first `num_restarts` points at
                    # random
                    initial_points = initial_points[
                        torch.randperm(len(initial_points))
                    ][:num_restarts]

                return initial_points
            else:
                raise RuntimeWarning(
                    "cannot use PF as initial optimization points "
                    "for non-LBFGS optimizers, ignoring flag"
                )

        return None

model_input_names property

variable names corresponding to trained model

__init__(**kwargs)

Initialize the generator.

Source code in xopt/generator.py
119
120
121
122
123
124
def __init__(self, **kwargs):
    """
    Initialize the generator.
    """
    super().__init__(**kwargs)
    logger.info(f"Initialized generator {self.name}")

add_data(new_data)

Add new data to the generator for Bayesian Optimization.

Parameters:

Name Type Description Default
new_data DataFrame

The new data to be added to the generator.

required
Notes

This method appends the new data to the existing data in the generator.

Source code in xopt/generators/bayesian/bayesian_generator.py
337
338
339
340
341
342
343
344
345
346
347
348
349
350
def add_data(self, new_data: pd.DataFrame):
    """
    Add new data to the generator for Bayesian Optimization.

    Parameters
    ----------
    new_data : pd.DataFrame
        The new data to be added to the generator.

    Notes
    -----
    This method appends the new data to the existing data in the generator.
    """
    self.data = pd.concat([self.data, new_data], axis=0, ignore_index=True)

generate(n_candidates)

Generate candidates using Bayesian Optimization.

Parameters:

Name Type Description Default
n_candidates int

The number of candidates to generate in each optimization step.

required

Returns:

Type Description
List[Dict]

A list of dictionaries containing the generated candidates.

Raises:

Type Description
NotImplementedError

If the number of candidates is greater than 1, and the generator does not support batch candidate generation.

RuntimeError

If no data is contained in the generator, the 'add_data' method should be called to add data before generating candidates.

Notes

This method generates candidates for Bayesian Optimization based on the provided number of candidates. It updates the internal model with the current data and calculates the candidates by optimizing the acquisition function. The method returns the generated candidates in the form of a list of dictionaries.

Source code in xopt/generators/bayesian/bayesian_generator.py
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
def generate(self, n_candidates: int):
    """
    Generate candidates using Bayesian Optimization.

    Parameters
    ----------
    n_candidates : int
        The number of candidates to generate in each optimization step.

    Returns
    -------
    List[Dict]
        A list of dictionaries containing the generated candidates.

    Raises
    ------
    NotImplementedError
        If the number of candidates is greater than 1, and the generator does not
        support batch candidate generation.

    RuntimeError
        If no data is contained in the generator, the 'add_data' method should be
        called to add data before generating candidates.

    Notes
    -----
    This method generates candidates for Bayesian Optimization based on the
    provided number of candidates. It updates the internal model with the current
    data and calculates the candidates by optimizing the acquisition function.
    The method returns the generated candidates in the form of a list of dictionaries.
    """

    self.n_candidates = n_candidates
    if n_candidates > 1 and not self.supports_batch_generation:
        raise NotImplementedError(
            "This Bayesian algorithm does not currently support parallel candidate "
            "generation"
        )

    # if no data exists raise error
    if self.data is None:
        raise RuntimeError(
            "no data contained in generator, call `add_data` "
            "method to add data, see also `Xopt.random_evaluate()`"
        )

    else:
        # dict to track runtimes
        timing_results = {}

        # update internal model with internal data
        start_time = time.perf_counter()
        model = self.train_model(self.get_training_data(self.data))
        timing_results["training"] = time.perf_counter() - start_time

        # propose candidates given model
        start_time = time.perf_counter()
        candidates = self.propose_candidates(model, n_candidates=n_candidates)
        timing_results["acquisition_optimization"] = (
            time.perf_counter() - start_time
        )

        # post process candidates
        result = self._process_candidates(candidates)

        # append timing results to dataframe (if it exists)
        if self.computation_time is not None:
            self.computation_time = pd.concat(
                (
                    self.computation_time,
                    pd.DataFrame(timing_results, index=[0]),
                ),
                ignore_index=True,
            )
        else:
            self.computation_time = pd.DataFrame(timing_results, index=[0])

        if self.n_interpolate_points is not None:
            if self.n_candidates > 1:
                raise RuntimeError(
                    "cannot generate interpolated points for "
                    "multiple candidate generation"
                )
            else:
                assert len(result) == 1
                result = interpolate_points(
                    pd.concat(
                        (self.data.iloc[-1:][self.vocs.variable_names], result),
                        axis=0,
                        ignore_index=True,
                    ),
                    num_points=self.n_interpolate_points,
                )

        return result.to_dict("records")

get_acquisition(model)

Get the acquisition function for Bayesian Optimization. Note that this needs to overwrite the base method due to how qLogExpectedHypervolumeImprovement handles constraints.

Parameters:

Name Type Description Default
model Module

The model used for Bayesian Optimization.

required

Returns:

Type Description
FixedFeatureAcquisitionFunction

The acquisition function.

Source code in xopt/generators/bayesian/mobo.py
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def get_acquisition(self, model: torch.nn.Module):
    """
    Get the acquisition function for Bayesian Optimization.
    Note that this needs to overwrite the base method due to
    how qLogExpectedHypervolumeImprovement handles constraints.

    Parameters
    ----------
    model : torch.nn.Module
        The model used for Bayesian Optimization.

    Returns
    -------
    FixedFeatureAcquisitionFunction
        The acquisition function.
    """
    if model is None:
        raise ValueError("model cannot be None")

    # get base acquisition function
    acq = self._get_acquisition(model)

    # apply fixed features if specified in the generator
    acq = self._apply_fixed_features(acq)

    acq = acq.to(**self.tkwargs)
    return acq

get_input_data(data)

Convert input data to a torch tensor.

Parameters:

Name Type Description Default
data DataFrame

The input data in the form of a pandas DataFrame.

required

Returns:

Type Description
Tensor

A torch tensor containing the input data.

Notes

This method takes a pandas DataFrame as input data and converts it into a torch tensor. It specifically selects columns corresponding to the model's input names (variables), and the resulting tensor is configured with the data type and device settings from the generator.

Source code in xopt/generators/bayesian/bayesian_generator.py
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
def get_input_data(self, data: pd.DataFrame) -> torch.Tensor:
    """
    Convert input data to a torch tensor.

    Parameters
    ----------
    data : pd.DataFrame
        The input data in the form of a pandas DataFrame.

    Returns
    -------
    torch.Tensor
        A torch tensor containing the input data.

    Notes
    -----
    This method takes a pandas DataFrame as input data and converts it into a
    torch tensor. It specifically selects columns corresponding to the model's
    input names (variables), and the resulting tensor is configured with the data
    type and device settings from the generator.
    """
    return torch.tensor(
        data[self.model_input_names].to_numpy().copy(), **self.tkwargs
    )

get_optimum()

select the best point(s) given by the model using the Posterior mean

Source code in xopt/generators/bayesian/bayesian_generator.py
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
def get_optimum(self):
    """select the best point(s) given by the
    model using the Posterior mean"""
    acq = qUpperConfidenceBound(
        model=self.model, beta=0.0, objective=self._get_objective()
    )
    if len(self.vocs.constraints):
        acq = ConstrainedMCAcquisitionFunction(
            self.model,
            acq,
            self._get_constraint_callables(),
            sampler=self._get_sampler(self.model),
        )
    bounds = self._get_bounds()

    if self.fixed_features is not None:
        acq = self._apply_fixed_features(acq)

        indices = []
        for idx, name in enumerate(self.vocs.variable_names):
            if name not in self.fixed_features:
                indices += [idx]

        bounds = bounds[:, indices]

    bounds = bounds.to(**self.tkwargs)
    acq = acq.to(**self.tkwargs)

    # use default initial conditions for a global search
    result = self.numerical_optimizer.optimize(acq, bounds, 1)

    return self._process_candidates(result)

get_pareto_front_and_hypervolume()

Get the pareto front and hypervolume of the current data.

Returns:

Name Type Description
pareto_front_variables Tensor

The pareto front variable data.

pareto_front_objectives Tensor

The pareto front objective data.

pareto_mask Tensor

A mask indicating which points are part of the pareto front.

hv float

The hypervolume of the pareto front.

Source code in xopt/generators/bayesian/bayesian_generator.py
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
def get_pareto_front_and_hypervolume(
    self,
) -> tuple[torch.Tensor | None, torch.Tensor | None, torch.Tensor | None, float]:
    """
    Get the pareto front and hypervolume of the current data.

    Returns
    -------
    pareto_front_variables : torch.Tensor
        The pareto front variable data.
    pareto_front_objectives : torch.Tensor
        The pareto front objective data.
    pareto_mask : torch.Tensor
        A mask indicating which points are part of the pareto front.
    hv : float
        The hypervolume of the pareto front.
    """

    # get scaled data
    # note that the objective data is scaled by +/- 1
    # based on maximization / minimization
    variable_data, objective_data, weights = self._get_scaled_data(data=self.data)

    # if there are no valid points skip PF calculation and return None
    if len(variable_data) == 0:
        return None, None, None, 0.0

    pareto_front_variables, pareto_front_objectives, pareto_mask, hv = (
        compute_hypervolume_and_pf(
            variable_data,
            objective_data,
            self.torch_reference_point,
        )
    )

    # scale the pareto front objectives back to original space
    if pareto_front_objectives is not None:
        pareto_front_objectives = pareto_front_objectives / weights

    return (
        pareto_front_variables,
        pareto_front_objectives,
        pareto_mask,
        hv,
    )

get_training_data(data)

Get training data used to train the GP model.

If a turbo controller is specified with the flag restrict_model_data this will return a subset of data that is inside the trust region.

Parameters:

Name Type Description Default
data DataFrame

The data in the form of a pandas DataFrame.

required

Returns:

Name Type Description
data DataFrame

A subset of data used to train the model form of a pandas DataFrame.

Source code in xopt/generators/bayesian/bayesian_generator.py
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
def get_training_data(self, data: pd.DataFrame) -> pd.DataFrame:
    """
    Get training data used to train the GP model.

    If a turbo controller is specified with the flag `restrict_model_data` this
    will return a subset of data that is inside the trust region.

    Parameters
    ----------
    data : pd.DataFrame
        The data in the form of a pandas DataFrame.

    Returns
    -------
    data : pd.DataFrame
        A subset of data used to train the model form of a pandas DataFrame.

    """
    if self.turbo_controller is not None:
        if self.turbo_controller.restrict_model_data:
            data = self.turbo_controller.get_data_in_trust_region(data, self)
            if data.empty:
                raise FeasibilityError(
                    "No training data available to build model, because ",
                    "no points in the dataset are within the TuRBO trust region. ",
                )
    return data

model_dump(*args, **kwargs)

overwrite model dump to remove faux class attrs

Source code in xopt/generator.py
152
153
154
155
156
157
158
159
160
def model_dump(self, *args: Any, **kwargs: Any) -> dict[str, Any]:
    """overwrite model dump to remove faux class attrs"""

    res = super().model_dump(*args, **kwargs)

    res.pop("supports_batch_generation", None)
    res.pop("supports_multi_objective", None)

    return res

propose_candidates(model, n_candidates=1)

Propose candidates using Bayesian Optimization.

Parameters:

Name Type Description Default
model Module

The trained Bayesian model.

required
n_candidates int

The number of candidates to propose (default is 1).

1

Returns:

Type Description
Tensor

A tensor containing the proposed candidates.

Notes

This method proposes candidates for Bayesian Optimization by numerically optimizing the acquisition function using the trained model. It updates the state of the Turbo controller if used and calculates the optimization bounds.

Source code in xopt/generators/bayesian/bayesian_generator.py
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
def propose_candidates(self, model: Module, n_candidates: int = 1) -> Tensor:
    """
    Propose candidates using Bayesian Optimization.

    Parameters
    ----------
    model : Module
        The trained Bayesian model.
    n_candidates : int, optional
        The number of candidates to propose (default is 1).

    Returns
    -------
    Tensor
        A tensor containing the proposed candidates.

    Notes
    -----
    This method proposes candidates for Bayesian Optimization by numerically
    optimizing the acquisition function using the trained model. It updates the
    state of the Turbo controller if used and calculates the optimization bounds.
    """
    # update TurBO state if used with the last `n_candidates` points
    if self.turbo_controller is not None:
        self.turbo_controller.update_state(self, n_candidates)

    # calculate optimization bounds
    bounds = self._get_optimization_bounds()

    # get acquisition function
    acq_funct = self.get_acquisition(model)

    # get initial candidates to start acquisition function optimization
    initial_points = self._get_initial_conditions(n_candidates)

    # get candidates -- grid optimizer does not support batch_initial_conditions
    if isinstance(self.numerical_optimizer, GridOptimizer):
        candidates = self.numerical_optimizer.optimize(
            acq_funct, bounds, n_candidates
        )
    else:
        candidates = self.numerical_optimizer.optimize(
            acq_funct, bounds, n_candidates, batch_initial_conditions=initial_points
        )
    return candidates

train_model(data=None, update_internal=True)

Train a Bayesian model for Bayesian Optimization.

Parameters:

Name Type Description Default
data DataFrame

The data to be used for training the model. If not provided, the internal data of the generator is used.

None
update_internal bool

Flag to indicate whether to update the internal model of the generator with the trained model (default is True).

True

Returns:

Type Description
Module

The trained Bayesian model.

Raises:

Type Description
ValueError

If no data is available to build the model.

Notes

This method trains a Bayesian model using the provided data or the internal data of the generator. It updates the internal model with the trained model if the 'update_internal' flag is set to True.

Source code in xopt/generators/bayesian/bayesian_generator.py
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
def train_model(
    self, data: pd.DataFrame | None = None, update_internal: bool = True
) -> Module:
    """
    Train a Bayesian model for Bayesian Optimization.

    Parameters
    ----------
    data : pd.DataFrame, optional
        The data to be used for training the model. If not provided, the internal
        data of the generator is used.
    update_internal : bool, optional
        Flag to indicate whether to update the internal model of the generator
        with the trained model (default is True).

    Returns
    -------
    Module
        The trained Bayesian model.

    Raises
    ------
    ValueError
        If no data is available to build the model.

    Notes
    -----
    This method trains a Bayesian model using the provided data or the internal
    data of the generator. It updates the internal model with the trained model
    if the 'update_internal' flag is set to True.
    """
    if data is None:
        data = self.get_training_data(self.data)
        if data is None:
            raise ValueError("no data available to build model")

    if data.empty:
        raise ValueError("no data available to build model")

    # get input bounds
    variable_bounds = {
        name: ele.domain for name, ele in self.vocs.variables.items()
    }

    # if turbo restrict points is true then set the bounds to the trust region
    # bounds
    if self.turbo_controller is not None:
        if self.turbo_controller.restrict_model_data:
            variable_bounds = dict(
                zip(
                    self.vocs.variable_names,
                    self.turbo_controller.get_trust_region(self).numpy().T,
                )
            )

    # add fixed feature bounds if requested
    if self.fixed_features is not None:
        # get bounds for each fixed_feature (vocs bounds take precedent)
        for key in self.fixed_features:
            if key not in variable_bounds:
                if key not in data:
                    raise KeyError(
                        "generator data needs to contain fixed feature "
                        f"column name `{key}`"
                    )
                f_data = data[key]
                bounds = [f_data.min(), f_data.max()]
                if bounds[1] - bounds[0] < 1e-8:
                    bounds[1] = bounds[0] + 1e-8
                variable_bounds[key] = bounds

    _model = self.gp_constructor.build_model(
        self.model_input_names,
        self.vocs.output_names,
        data,
        {name: variable_bounds[name] for name in self.model_input_names},
        **self.tkwargs,
    )

    if update_internal:
        self.model = _model

    return _model

update_pareto_front_history()

Update the historical pareto front statistics in the generator.

For each row of data in self.data, compute the pareto front stats (hypervolume, number of non-dominated points) if there is no corresponding entry exists in the self.pareto_front_history DataFrame.

Source code in xopt/generators/bayesian/bayesian_generator.py
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
def update_pareto_front_history(self):
    """
    Update the historical pareto front statistics in the generator.

    For each row of data in self.data, compute the pareto front stats
    (hypervolume, number of non-dominated points) if there is no
    corresponding entry exists in the `self.pareto_front_history` DataFrame.
    """
    # TODO: make sure this works when manually changing the data frame
    if self.pareto_front_history is None:
        self.pareto_front_history = pd.DataFrame()

    # for each row of data, compute the cumulative pareto front stats
    for i in self.data.index:
        # check if the pareto front stats already exist
        if i in self.pareto_front_history.index:
            continue

        # get scaled data
        variable_data, objective_data, _ = self._get_scaled_data(
            data=self.data.loc[:i]
        )

        # compute the pareto front stats
        _, pareto_front_variables, _, hv = compute_hypervolume_and_pf(
            variable_data,
            objective_data,
            self.torch_reference_point,
        )

        # get the number of non-dominated points
        n_non_dominated = (
            len(pareto_front_variables) if pareto_front_variables is not None else 0
        )

        # create a new row for the pareto front stats
        new_row: dict[str, Any] = {
            "iteration": i,
            "hypervolume": hv,
            "n_non_dominated": n_non_dominated,
        }
        # add the new row to the pareto front history
        self.pareto_front_history = pd.concat(
            [
                self.pareto_front_history,
                pd.DataFrame(new_row, index=[i]),
            ],
            ignore_index=False,
        )

validate_turbo_controller(value, info) classmethod

note default behavior is no use of turbo

Source code in xopt/generators/bayesian/bayesian_generator.py
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
@field_validator("turbo_controller", mode="before")
@classmethod
def validate_turbo_controller(cls, value: Any, info: ValidationInfo):
    """note default behavior is no use of turbo"""
    if value is None:
        return value

    compatible_turbo_controllers = [
        turbo_controller
        for turbo_controller in cls.get_compatible_turbo_controllers()
        if turbo_controller is not None
    ]

    if len(compatible_turbo_controllers) == 0:
        raise ValueError("no turbo controllers are compatible with this generator")
    else:
        return validate_turbo_controller_base(
            value, compatible_turbo_controllers, info
        )

visualize_model(**kwargs)

Display GP model predictions for the selected output(s).

The GP models are displayed with respect to the named variables. If None are given, the list of variables in vocs is used. Feasible samples are indicated with a filled orange "o", infeasible samples with a hollow red "o". Feasibility is calculated with respect to all constraints unless the selected output is a constraint itself, in which case only that one is considered.

Parameters:

Name Type Description Default
**kwargs

Supported keyword arguments: - output_names : List[str] Outputs for which the GP models are displayed. Defaults to all outputs in vocs. - variable_names : List[str] The variables with respect to which the GP models are displayed (maximum of 2). Defaults to vocs.variable_names. - idx : int Index of the last sample to use. This also selects the point of reference in higher dimensions unless an explicit reference_point is given. - reference_point : dict Reference point determining the value of variables in vocs.variable_names, but not in variable_names (slice plots in higher dimensions). Defaults to last used sample. - show_samples : bool, optional Whether samples are shown. - show_prior_mean : bool, optional Whether the prior mean is shown. - show_feasibility : bool, optional Whether the feasibility region is shown. - show_acquisition : bool, optional Whether the acquisition function is computed and shown (only if acquisition function is not None). - n_grid : int, optional Number of grid points per dimension used to display the model predictions. - axes : Axes, optional Axes object used for plotting. - exponentiate : bool, optional Flag to exponentiate acquisition function before plotting.

{}

Returns:

Name Type Description
result tuple

The matplotlib figure and axes objects.

Source code in xopt/generators/bayesian/bayesian_generator.py
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
def visualize_model(self, **kwargs):
    """Display GP model predictions for the selected output(s).

    The GP models are displayed with respect to the named variables. If None are given, the list of variables in
    vocs is used. Feasible samples are indicated with a filled orange "o", infeasible samples with a hollow
    red "o". Feasibility is calculated with respect to all constraints unless the selected output is a
    constraint itself, in which case only that one is considered.

    Parameters
    ----------
    **kwargs: dict, optional
        Supported keyword arguments:
        - output_names : List[str]
            Outputs for which the GP models are displayed. Defaults to all outputs in vocs.
        - variable_names : List[str]
            The variables with respect to which the GP models are displayed (maximum of 2).
            Defaults to vocs.variable_names.
        - idx : int
            Index of the last sample to use. This also selects the point of reference in
            higher dimensions unless an explicit reference_point is given.
        - reference_point : dict
            Reference point determining the value of variables in vocs.variable_names, but not in variable_names
            (slice plots in higher dimensions). Defaults to last used sample.
        - show_samples : bool, optional
            Whether samples are shown.
        - show_prior_mean : bool, optional
            Whether the prior mean is shown.
        - show_feasibility : bool, optional
            Whether the feasibility region is shown.
        - show_acquisition : bool, optional
            Whether the acquisition function is computed and shown (only if acquisition function is not None).
        - n_grid : int, optional
            Number of grid points per dimension used to display the model predictions.
        - axes : Axes, optional
            Axes object used for plotting.
        - exponentiate : bool, optional
            Flag to exponentiate acquisition function before plotting.

    Returns
    -------
    result : tuple
        The matplotlib figure and axes objects.
    """
    return visualize_generator_model(self, **kwargs)

yaml(**kwargs)

serialize first then dump to yaml string

Source code in xopt/pydantic.py
231
232
233
234
235
236
237
238
def yaml(self, **kwargs):
    """serialize first then dump to yaml string"""
    output = json.loads(
        self.to_json(
            **kwargs,
        )
    )
    return yaml.dump(output)