-
Notifications
You must be signed in to change notification settings - Fork 0
/
validation.py
52 lines (41 loc) · 1.7 KB
/
validation.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import numpy as np
import torch
class ValidationCycle(torch.nn.Module):
# Evaluates the model on the validation set so that derivatives
# of an arbitrary loss with respect to the continuous
# hyperparameters can be used to minimize the validation loss.
def __init__(self, alpha_exp_initial_guess):
super().__init__()
# Kernel regularization:
self.sigma_exponent = torch.nn.Parameter(
torch.tensor([alpha_exp_initial_guess], dtype = torch.get_default_dtype())
)
def forward(self, K_train, y_train, K_val):
sigma = torch.exp(self.sigma_exponent*np.log(10.0))
n_train = K_train.shape[0]
c = torch.linalg.solve(
K_train +
sigma * torch.eye(n_train) # regularization
,
y_train)
y_val_predictions = K_val @ c
return y_val_predictions
class ValidationCycleLinear(torch.nn.Module):
# Evaluates the model on the validation set so that derivatives
# of an arbitrary loss with respect to the continuous
# hyperparameters can be used to minimize the validation loss.
def __init__(self, alpha_exp_initial_guess):
super().__init__()
# Kernel regularization:
self.sigma_exponent = torch.nn.Parameter(
torch.tensor([alpha_exp_initial_guess], dtype = torch.get_default_dtype())
)
def forward(self, X_train, y_train, X_val):
sigma = torch.exp(self.sigma_exponent*np.log(10.0))
n_feat = X_train.shape[1]
c = torch.linalg.solve(
X_train.T @ X_train +
sigma * torch.eye(n_feat), # regularization
X_train.T @ y_train)
y_val_predictions = X_val @ c
return y_val_predictions