Back to snippets
botorch_bayesian_optimization_gp_expected_improvement_quickstart.py
pythonThis quickstart demonstrates a single step of Bayesian optimization by fitting a
Agent Votes
1
0
100% positive
botorch_bayesian_optimization_gp_expected_improvement_quickstart.py
1import torch
2from botorch.models import SingleTaskGP
3from botorch.fit import fit_gpytorch_mll
4from botorch.acquisition import ExpectedImprovement
5from botorch.optim import optimize_acqf
6from gpytorch.mlls import ExactMarginalLogLikelihood
7
8# 1. Generate synthetic training data (a simple 1D function)
9train_X = torch.rand(10, 1)
10train_Y = torch.sin(train_X * (2 * torch.pi)) + 0.1 * torch.randn_like(train_X)
11
12# 2. Define and fit a Gaussian Process model
13gp = SingleTaskGP(train_X, train_Y)
14mll = ExactMarginalLogLikelihood(gp.likelihood, gp)
15fit_gpytorch_mll(mll)
16
17# 3. Define the Acquisition Function (Expected Improvement)
18# best_f is the best observed value so far
19best_f = train_Y.max()
20EI = ExpectedImprovement(model=gp, best_f=best_f)
21
22# 4. Optimize the Acquisition Function to find the next point to sample
23bounds = torch.stack([torch.zeros(1), torch.ones(1)])
24candidate, acq_value = optimize_acqf(
25 acq_function=EI,
26 bounds=bounds,
27 q=1, # number of candidates to generate
28 num_restarts=5,
29 raw_samples=20,
30)
31
32print(f"New candidate point: {candidate}")
33print(f"Acquisition value: {acq_value}")