Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade fbcode/pytorch to Python Scientific Stack 2 #2584

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions botorch/exceptions/errors.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@

from typing import Any

import numpy as np
import numpy.typing as npt


class BotorchError(Exception):
Expand Down Expand Up @@ -59,7 +59,7 @@ class OptimizationTimeoutError(BotorchError):
r"""Exception raised when optimization times out."""

def __init__(
self, /, *args: Any, current_x: np.ndarray, runtime: float, **kwargs: Any
self, /, *args: Any, current_x: npt.NDArray, runtime: float, **kwargs: Any
) -> None:
r"""
Args:
Expand All @@ -77,7 +77,7 @@ def __init__(
class OptimizationGradientError(BotorchError, RuntimeError):
r"""Exception raised when gradient array `gradf` containts NaNs."""

def __init__(self, /, *args: Any, current_x: np.ndarray, **kwargs: Any) -> None:
def __init__(self, /, *args: Any, current_x: npt.NDArray, **kwargs: Any) -> None:
r"""
Args:
*args: Standard args to `BoTorchError`.
Expand Down
5 changes: 3 additions & 2 deletions botorch/generation/gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
from typing import Any, NoReturn

import numpy as np
import numpy.typing as npt
import torch
from botorch.acquisition import AcquisitionFunction
from botorch.exceptions.errors import OptimizationGradientError
Expand Down Expand Up @@ -191,7 +192,7 @@ def gen_candidates_scipy(
with_grad = options.get("with_grad", True)
if with_grad:

def f_np_wrapper(x: np.ndarray, f: Callable):
def f_np_wrapper(x: npt.NDArray, f: Callable):
"""Given a torch callable, compute value + grad given a numpy array."""
if np.isnan(x).any():
raise RuntimeError(
Expand Down Expand Up @@ -223,7 +224,7 @@ def f_np_wrapper(x: np.ndarray, f: Callable):

else:

def f_np_wrapper(x: np.ndarray, f: Callable):
def f_np_wrapper(x: npt.NDArray, f: Callable):
X = torch.from_numpy(x).to(initial_conditions).view(shapeX).contiguous()
with torch.no_grad():
X_fix = fix_features(X=X, fixed_features=fixed_features)
Expand Down
9 changes: 5 additions & 4 deletions botorch/models/pairwise_gp.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
from typing import Any

import numpy as np
import numpy.typing as npt
import torch
from botorch.acquisition.objective import PosteriorTransform
from botorch.exceptions import UnsupportedError
Expand Down Expand Up @@ -397,13 +398,13 @@ def _prior_predict(self, X: Tensor) -> tuple[Tensor, Tensor]:

def _grad_posterior_f(
self,
utility: Tensor | np.ndarray,
utility: Tensor | npt.NDArray,
datapoints: Tensor,
D: Tensor,
covar_chol: Tensor,
covar_inv: Tensor | None = None,
ret_np: bool = False,
) -> Tensor | np.ndarray:
) -> Tensor | npt.NDArray:
r"""Compute the gradient of S loss wrt to f/utility in [Chu2005preference]_.

For finding f_map, which is negative of the log posterior, i.e., -log(p(f|D))
Expand Down Expand Up @@ -441,13 +442,13 @@ def _grad_posterior_f(

def _hess_posterior_f(
self,
utility: Tensor | np.ndarray,
utility: Tensor | npt.NDArray,
datapoints: Tensor,
D: Tensor,
covar_chol: Tensor,
covar_inv: Tensor,
ret_np: bool = False,
) -> Tensor | np.ndarray:
) -> Tensor | npt.NDArray:
r"""Compute the hessian of S loss wrt utility for finding f_map.

which is negative of the log posterior, i.e., -log(p(f|D))
Expand Down
24 changes: 13 additions & 11 deletions botorch/optim/closures/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@
from functools import partial
from typing import Any

import numpy.typing as npt

import torch
from botorch.optim.utils import (
_handle_numerical_errors,
Expand All @@ -21,7 +23,7 @@
)
from botorch.optim.utils.numpy_utils import as_ndarray
from botorch.utils.context_managers import zero_grad_ctx
from numpy import float64 as np_float64, full as np_full, ndarray, zeros as np_zeros
from numpy import float64 as np_float64, full as np_full, zeros as np_zeros
from torch import Tensor


Expand Down Expand Up @@ -82,10 +84,10 @@ def __init__(
self,
closure: Callable[[], tuple[Tensor, Sequence[Tensor | None]]],
parameters: dict[str, Tensor],
as_array: Callable[[Tensor], ndarray] = None, # pyre-ignore [9]
as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor,
get_state: Callable[[], ndarray] = None, # pyre-ignore [9]
set_state: Callable[[ndarray], None] = None, # pyre-ignore [9]
as_array: Callable[[Tensor], npt.NDArray] = None, # pyre-ignore [9]
as_tensor: Callable[[npt.NDArray], Tensor] = torch.as_tensor,
get_state: Callable[[], npt.NDArray] = None, # pyre-ignore [9]
set_state: Callable[[npt.NDArray], None] = None, # pyre-ignore [9]
fill_value: float = 0.0,
persistent: bool = True,
) -> None:
Expand Down Expand Up @@ -140,11 +142,11 @@ def __init__(

self.fill_value = fill_value
self.persistent = persistent
self._gradient_ndarray: ndarray | None = None
self._gradient_ndarray: npt.NDArray | None = None

def __call__(
self, state: ndarray | None = None, **kwargs: Any
) -> tuple[ndarray, ndarray]:
self, state: npt.NDArray | None = None, **kwargs: Any
) -> tuple[npt.NDArray, npt.NDArray]:
if state is not None:
self.state = state

Expand All @@ -164,14 +166,14 @@ def __call__(
return value, grads

@property
def state(self) -> ndarray:
def state(self) -> npt.NDArray:
return self._get_state()

@state.setter
def state(self, state: ndarray) -> None:
def state(self, state: npt.NDArray) -> None:
self._set_state(state)

def _get_gradient_ndarray(self, fill_value: float | None = None) -> ndarray:
def _get_gradient_ndarray(self, fill_value: float | None = None) -> npt.NDArray:
if self.persistent and self._gradient_ndarray is not None:
if fill_value is not None:
self._gradient_ndarray.fill(fill_value)
Expand Down
8 changes: 5 additions & 3 deletions botorch/optim/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,12 @@
from time import monotonic
from typing import Any

import numpy.typing as npt

from botorch.optim.closures import NdarrayOptimizationClosure
from botorch.optim.utils.numpy_utils import get_bounds_as_ndarray
from botorch.optim.utils.timeout import minimize_with_timeout
from numpy import asarray, float64 as np_float64, ndarray
from numpy import asarray, float64 as np_float64
from torch import Tensor
from torch.optim.adam import Adam
from torch.optim.optimizer import Optimizer
Expand Down Expand Up @@ -60,7 +62,7 @@ def scipy_minimize(
parameters: dict[str, Tensor],
bounds: dict[str, tuple[float | None, float | None]] | None = None,
callback: Callable[[dict[str, Tensor], OptimizationResult], None] | None = None,
x0: ndarray | None = None,
x0: npt.NDArray | None = None,
method: str = "L-BFGS-B",
options: dict[str, Any] | None = None,
timeout_sec: float | None = None,
Expand Down Expand Up @@ -98,7 +100,7 @@ def scipy_minimize(
else:
call_counter = count(1) # callbacks are typically made at the end of each iter

def wrapped_callback(x: ndarray):
def wrapped_callback(x: npt.NDArray):
result = OptimizationResult(
step=next(call_counter),
fval=float(wrapped_closure(x)[0]),
Expand Down
9 changes: 5 additions & 4 deletions botorch/optim/parameter_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from typing import Union

import numpy as np
import numpy.typing as npt
import torch
from botorch.exceptions.errors import CandidateGenerationError, UnsupportedError
from scipy.optimize import Bounds
Expand Down Expand Up @@ -131,7 +132,7 @@ def make_scipy_linear_constraints(


def eval_lin_constraint(
x: np.ndarray, flat_idxr: list[int], coeffs: np.ndarray, rhs: float
x: npt.NDArray, flat_idxr: list[int], coeffs: npt.NDArray, rhs: float
) -> np.float64:
r"""Evaluate a single linear constraint.

Expand All @@ -148,8 +149,8 @@ def eval_lin_constraint(


def lin_constraint_jac(
x: np.ndarray, flat_idxr: list[int], coeffs: np.ndarray, n: int
) -> np.ndarray:
x: npt.NDArray, flat_idxr: list[int], coeffs: npt.NDArray, n: int
) -> npt.NDArray:
r"""Return the Jacobian associated with a linear constraint.

Args:
Expand All @@ -167,7 +168,7 @@ def lin_constraint_jac(
return jac


def _arrayify(X: Tensor) -> np.ndarray:
def _arrayify(X: Tensor) -> npt.NDArray:
r"""Convert a torch.Tensor (any dtype or device) to a numpy (double) array.

Args:
Expand Down
5 changes: 3 additions & 2 deletions botorch/optim/utils/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,13 @@
from warnings import warn_explicit, WarningMessage

import numpy as np
import numpy.typing as npt
from linear_operator.utils.errors import NanError, NotPSDError


def _handle_numerical_errors(
error: RuntimeError, x: np.ndarray, dtype: np.dtype | None = None
) -> tuple[np.ndarray, np.ndarray]:
error: RuntimeError, x: npt.NDArray, dtype: np.dtype | None = None
) -> tuple[npt.NDArray, npt.NDArray]:
if isinstance(error, NotPSDError):
raise error
error_message = error.args[0] if len(error.args) > 0 else ""
Expand Down
16 changes: 8 additions & 8 deletions botorch/optim/utils/numpy_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@
from itertools import tee

import numpy as np
import numpy.typing as npt
import torch
from numpy import ndarray
from torch import Tensor


Expand All @@ -35,7 +35,7 @@

def as_ndarray(
values: Tensor, dtype: np.dtype | None = None, inplace: bool = True
) -> ndarray:
) -> npt.NDArray:
r"""Helper for going from torch.Tensor to numpy.ndarray.

Args:
Expand Down Expand Up @@ -67,10 +67,10 @@ def as_ndarray(

def get_tensors_as_ndarray_1d(
tensors: Iterator[Tensor] | dict[str, Tensor],
out: ndarray | None = None,
out: npt.NDArray | None = None,
dtype: np.dtype | str | None = None,
as_array: Callable[[Tensor], ndarray] = as_ndarray,
) -> ndarray:
as_array: Callable[[Tensor], npt.NDArray] = as_ndarray,
) -> npt.NDArray:
# Create a pair of iterators, one for setup and one for data transfer
named_tensors_iter, named_tensors_iter2 = tee(
iter(tensors.items()) if isinstance(tensors, dict) else enumerate(tensors), 2
Expand Down Expand Up @@ -112,8 +112,8 @@ def get_tensors_as_ndarray_1d(

def set_tensors_from_ndarray_1d(
tensors: Iterator[Tensor] | dict[str, Tensor],
array: ndarray,
as_tensor: Callable[[ndarray], Tensor] = torch.as_tensor,
array: npt.NDArray,
as_tensor: Callable[[npt.NDArray], Tensor] = torch.as_tensor,
) -> None:
r"""Sets the values of one more tensors based off of a vector of assignments."""
named_tensors_iter = (
Expand All @@ -137,7 +137,7 @@ def set_tensors_from_ndarray_1d(
def get_bounds_as_ndarray(
parameters: dict[str, Tensor],
bounds: dict[str, tuple[float | Tensor | None, float | Tensor | None]],
) -> np.ndarray | None:
) -> npt.NDArray | None:
r"""Helper method for converting bounds into an ndarray.

Args:
Expand Down
12 changes: 6 additions & 6 deletions botorch/optim/utils/timeout.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,14 +11,14 @@
from collections.abc import Callable, Sequence
from typing import Any

import numpy as np
import numpy.typing as npt
from botorch.exceptions.errors import OptimizationTimeoutError
from scipy import optimize


def minimize_with_timeout(
fun: Callable[[np.ndarray, ...], float],
x0: np.ndarray,
fun: Callable[[npt.NDArray, ...], float],
x0: npt.NDArray,
args: tuple[Any, ...] = (),
method: str | None = None,
jac: str | Callable | bool | None = None,
Expand All @@ -45,7 +45,7 @@ def minimize_with_timeout(
start_time = time.monotonic()
callback_data = {"num_iterations": 0} # update from withing callback below

def timeout_callback(xk: np.ndarray) -> bool:
def timeout_callback(xk: npt.NDArray) -> bool:
runtime = time.monotonic() - start_time
callback_data["num_iterations"] += 1
if runtime > timeout_sec:
Expand All @@ -63,14 +63,14 @@ def timeout_callback(xk: np.ndarray) -> bool:
elif method == "trust-constr": # special signature

def wrapped_callback(
xk: np.ndarray, state: optimize.OptimizeResult
xk: npt.NDArray, state: optimize.OptimizeResult
) -> bool:
# order here is important to make sure base callback gets executed
return callback(xk, state) or timeout_callback(xk=xk)

else:

def wrapped_callback(xk: np.ndarray) -> None:
def wrapped_callback(xk: npt.NDArray) -> None:
timeout_callback(xk=xk)
callback(xk)

Expand Down
11 changes: 6 additions & 5 deletions botorch/utils/sampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
from typing import Any, TYPE_CHECKING

import numpy as np
import numpy.typing as npt
import scipy
import torch
from botorch.exceptions.errors import BotorchError
Expand Down Expand Up @@ -374,11 +375,11 @@ def _convert_bounds_to_inequality_constraints(bounds: Tensor) -> tuple[Tensor, T


def find_interior_point(
A: np.ndarray,
b: np.ndarray,
A_eq: np.ndarray | None = None,
b_eq: np.ndarray | None = None,
) -> np.ndarray:
A: npt.NDArray,
b: npt.NDArray,
A_eq: npt.NDArray | None = None,
b_eq: npt.NDArray | None = None,
) -> npt.NDArray:
r"""Find an interior point of a polytope via linear programming.

Args:
Expand Down
5 changes: 3 additions & 2 deletions test/optim/test_parameter_constraints.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from itertools import product

import numpy as np
import numpy.typing as npt
import torch
from botorch.exceptions.errors import CandidateGenerationError, UnsupportedError
from botorch.optim.parameter_constraints import (
Expand Down Expand Up @@ -55,7 +56,7 @@ def test_make_nonlinear_constraints(self):
def nlc(x):
return 4 - x.sum()

def f_np_wrapper(x: np.ndarray, f: Callable):
def f_np_wrapper(x: npt.NDArray, f: Callable):
"""Given a torch callable, compute value + grad given a numpy array."""
X = (
torch.from_numpy(x)
Expand Down Expand Up @@ -114,7 +115,7 @@ def test_make_scipy_nonlinear_inequality_constraints(self):
def nlc(x):
return 4 - x.sum()

def f_np_wrapper(x: np.ndarray, f: Callable):
def f_np_wrapper(x: npt.NDArray, f: Callable):
"""Given a torch callable, compute value + grad given a numpy array."""
X = (
torch.from_numpy(x)
Expand Down
Loading
Loading