Skip to content

Commit

Permalink
Remove checkpointing from test
Browse files Browse the repository at this point in the history
  • Loading branch information
gpleiss committed Jun 2, 2023
1 parent 1822b7c commit c64833b
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 29 deletions.
9 changes: 3 additions & 6 deletions test/examples/test_simple_gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ def test_gp_posterior_single_training_point_smoke_test(self):

def test_posterior_latent_gp_and_likelihood_with_optimization(self, cuda=False, checkpoint=0):
train_x, test_x, train_y, test_y = self._get_data(
cuda=cuda, num_data=(1000 if checkpoint else 11), add_noise=bool(checkpoint)
cuda=cuda, num_data=(11), add_noise=bool(checkpoint)
)
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = GaussianLikelihood(noise_prior=SmoothedBoxPrior(exp(-3), exp(3), sigma=0.1))
Expand All @@ -234,8 +234,8 @@ def test_posterior_latent_gp_and_likelihood_with_optimization(self, cuda=False,
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.15)
with gpytorch.beta_features.checkpoint_kernel(checkpoint), gpytorch.settings.fast_pred_var():
for _ in range(20 if checkpoint else 50):
with gpytorch.settings.fast_pred_var():
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x)
loss = -mll(output, train_y)
Expand All @@ -256,9 +256,6 @@ def test_posterior_latent_gp_and_likelihood_with_optimization(self, cuda=False,

self.assertLess(mean_abs_error.item(), 0.05)

def test_gp_with_checkpointing(self, cuda=False):
return self.test_posterior_latent_gp_and_likelihood_with_optimization(cuda=cuda, checkpoint=250)

def test_fantasy_updates_cuda(self):
if torch.cuda.is_available():
with least_used_cuda_device():
Expand Down
23 changes: 0 additions & 23 deletions test/lazy/test_lazy_evaluated_kernel_tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,29 +112,6 @@ def _test_inv_matmul(self, rhs, lhs=None, cholesky=False):
else:
self.assertFalse(linear_cg_mock.called)

def test_inv_matmul_matrix_with_checkpointing(self):
# Add one checkpointing test
lazy_tensor = self.create_linear_op().requires_grad_(True)
lazy_tensor_copy = lazy_tensor.clone().detach_().requires_grad_(True)
evaluated = self.evaluate_linear_op(lazy_tensor_copy)

test_vector = torch.randn(2, 5, 6)
test_vector_copy = test_vector.clone()
with gpytorch.beta_features.checkpoint_kernel(2):
res = lazy_tensor.solve(test_vector)
actual = evaluated.inverse().matmul(test_vector_copy)
self.assertLess(((res - actual).abs() / actual.abs().clamp(1, 1e5)).max().item(), 3e-1)

grad = torch.randn_like(res)
res.backward(gradient=grad)
actual.backward(gradient=grad)

for param, param_copy in zip(lazy_tensor.kernel.parameters(), lazy_tensor_copy.kernel.parameters()):
self.assertAllClose(param.grad, param_copy.grad, rtol=1e-3)
self.assertAllClose(
lazy_tensor.x1.grad + lazy_tensor.x2.grad, lazy_tensor_copy.x1.grad + lazy_tensor_copy.x2.grad, rtol=1e-3
)

def test_batch_getitem(self):
"""Indexing was wrong when the kernel had more batch dimensions than the
data"""
Expand Down

0 comments on commit c64833b

Please sign in to comment.