Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix minimization of improvement-based MC acquisition functions #465

Merged
merged 6 commits into from
Jan 23, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
between constraints and dropped parameters yielding empty parameter sets
- Minimizing a single `NumericalTarget` with specified bounds/transformation via
`SingleTargetObjective` no longer erroneously maximizes it
- Improvement-based Monte Carlo acquisition functions now use the correct
reference value in minimization mode

### Removed
- `botorch_function_wrapper` utility for creating lookup callables
Expand Down
8 changes: 5 additions & 3 deletions baybe/acquisition/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,16 +111,18 @@ def to_botorch(
additional_params["best_f"] = (
bo_surrogate.posterior(train_x).mean.min().item()
)
if issubclass(acqf_cls, bo_acqf.MCAcquisitionFunction):
additional_params["best_f"] *= -1.0

if issubclass(acqf_cls, bo_acqf.AnalyticAcquisitionFunction):
additional_params["maximize"] = False
elif issubclass(acqf_cls, bo_acqf.qNegIntegratedPosteriorVariance):
# qNIPV is valid but does not require any adjusted params
pass
elif issubclass(acqf_cls, bo_acqf.MCAcquisitionFunction):
additional_params["objective"] = LinearMCObjective(
torch.tensor([-1.0])
)
elif issubclass(acqf_cls, bo_acqf.qNegIntegratedPosteriorVariance):
# qNIPV is valid but does not require any adjusted params
pass
else:
raise ValueError(
f"Unsupported acquisition function type: {acqf_cls}."
Expand Down
62 changes: 62 additions & 0 deletions tests/integration/test_minimization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
"""Tests for target minimization."""

import numpy as np
import pandas as pd
import pytest
import torch
from torch.testing import assert_close

from baybe.acquisition.acqfs import qKnowledgeGradient
from baybe.acquisition.base import AcquisitionFunction
from baybe.parameters.numerical import NumericalDiscreteParameter
from baybe.surrogates.gaussian_process.core import GaussianProcessSurrogate
from baybe.targets.numerical import NumericalTarget
from baybe.utils.basic import get_subclasses
from baybe.utils.random import set_random_seed


def get_acqf_values(acqf_cls, surrogate, searchspace, objective, df):
# TODO: Should be replace once a proper public interface is available
acqf = acqf_cls().to_botorch(surrogate, searchspace, objective, df)
return acqf(torch.tensor(searchspace.transform(df).values).unsqueeze(-2))


def compute_posterior_and_acqf(acqf_cls, df, searchspace, objective):
surrogate_max = GaussianProcessSurrogate()
surrogate_max.fit(searchspace, objective, df)
with torch.no_grad():
posterior = surrogate_max.posterior(df)
acqf = get_acqf_values(acqf_cls, surrogate_max, searchspace, objective, df)
return posterior, acqf


@pytest.mark.parametrize(
"acqf_cls",
[
a
for a in get_subclasses(AcquisitionFunction)
if not issubclass(a, qKnowledgeGradient) # TODO: not yet clear how to handle
],
)
def test_minimization(acqf_cls):
AdrianSosic marked this conversation as resolved.
Show resolved Hide resolved
"""Maximizing targets is equivalent to minimizing target with inverted data."""
values = np.linspace(10, 20)
searchspace = NumericalDiscreteParameter("p", values).to_searchspace()

# Maximization of plain targets
set_random_seed(0)
df_max = pd.DataFrame({"p": values, "t": values})
obj_max = NumericalTarget("t", "MAX").to_objective()
p_min, acqf_max = compute_posterior_and_acqf(acqf_cls, df_max, searchspace, obj_max)

# Minimization of inverted targets
set_random_seed(0)
df_min = pd.DataFrame({"p": values, "t": -values})
obj_min = NumericalTarget("t", "MIN").to_objective()
p_max, acqf_min = compute_posterior_and_acqf(acqf_cls, df_min, searchspace, obj_min)

# Both must yield identical posterior (modulo the sign) and acquisition values
assert torch.equal(p_min.mean, -p_max.mean)
assert torch.equal(p_min.mvn.covariance_matrix, p_max.mvn.covariance_matrix)
# TODO: https://github.com/pytorch/botorch/issues/2681
AdrianSosic marked this conversation as resolved.
Show resolved Hide resolved
assert_close(acqf_max, acqf_min, rtol=0.0001, atol=0.1)
Loading