-
Notifications
You must be signed in to change notification settings - Fork 2
/
Predict.py
172 lines (134 loc) · 5.5 KB
/
Predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
#!/usr/bin/env python
# coding: utf-8
import os
import pprint
import torch
import pytorch_lightning as pl
from torch_geometric.loader import DataLoader
from LightningModules.GNN import InteractionGNN
from LightningModules.GNN.utils.data_utils import split_datasets, load_dataset
device = 'cuda' if torch.cuda.is_available() else 'cpu'
pp = pprint.PrettyPrinter(indent=2)
class SttDataModule(pl.LightningDataModule):
""""DataModules are a way of decoupling data-related hooks from the LightningModule"""
def __init__(self, hparams):
super().__init__()
# Save hyperparameters
self.save_hyperparameters(hparams)
# Set workers from hparams
self.n_workers = (
self.hparams["n_workers"]
if "n_workers" in self.hparams
else len(os.sched_getaffinity(0))
)
self.data_split = (
self.hparams["train_split"]
if "train_split" in self.hparams
else [0, 0, 5000]
)
self.trainset, self.valset, self.testset = None, None, None
self.predset = None
def print_params(self):
pp.pprint(self.hparams)
def setup(self, stage=None):
if stage == "fit" or stage is None:
self.trainset, self.valset, self.testset = split_datasets(**self.hparams)
if stage == "test" or stage is None:
print("Number of Test Events: ", self.hparams['train_split'][2])
self.testset = load_dataset(self.hparams["input_dir"], self.data_split[2])
if stage == "pred" or stage is None:
print("Number of Pred Events: ", self.hparams['train_split'][2])
self.predset = load_dataset(self.hparams["input_dir"], self.data_split[2])
def train_dataloader(self):
if self.trainset is not None:
return DataLoader(
self.trainset, batch_size=1, num_workers=self.n_workers
) # , pin_memory=True, persistent_workers=True)
else:
return None
def val_dataloader(self):
if self.valset is not None:
return DataLoader(
self.valset, batch_size=1, num_workers=self.n_workers
) # , pin_memory=True, persistent_workers=True)
else:
return None
def test_dataloader(self):
if self.testset is not None:
return DataLoader(
self.testset, batch_size=1, num_workers=self.n_workers
) # , pin_memory=True, persistent_workers=True)
else:
return None
# def predict_dataloader(self):
# if self.predset is not None:
# return DataLoader(
# self.predset, batch_size=1, num_workers=self.n_workers
# ) # , pin_memory=True, persistent_workers=True,)
# else:
# return None
# 1 - Helper Function
def get_input_data(batch):
input_data = batch.x
input_data[input_data != input_data] = 0
return input_data
# 2 - Helper Function
def handle_directed(batch, edge_sample, truth_sample, directed=False):
edge_sample = torch.cat([edge_sample, edge_sample.flip(0)], dim=-1)
truth_sample = truth_sample.repeat(2)
if directed:
direction_mask = batch.x[edge_sample[0], 0] < batch.x[edge_sample[1], 0]
edge_sample = edge_sample[:, direction_mask]
truth_sample = truth_sample[direction_mask]
return edge_sample, truth_sample
# 3 - Helper Function
def eval_model(model, test_dataloader):
"""Function to Evaluate a Model"""
scores, truths = [], []
model.eval()
with torch.no_grad():
for batch_idx, batch in enumerate(test_dataloader):
# logging
if batch_idx % 1000 == 0:
print("Processed Batches: ", batch_idx)
# predictions
input_data = get_input_data(batch)
edge_sample, truth_sample = handle_directed(batch, batch.edge_index, batch.y_pid)
output = model(input_data, edge_sample).squeeze()
score = torch.sigmoid(output)
# append each batch
scores.append(score)
truths.append(truth_sample)
return torch.cat(scores), torch.cat(truths)
# 4 - Main Function
def main():
# Load Model Checkpoint
ckpnt_path = "run_all/lightning_models/lightning_checkpoints/GNNStudy/version_1/checkpoints/last.ckpt"
checkpoint = torch.load(ckpnt_path, map_location=device)
pp.pprint(checkpoint.keys())
# View Hyperparameters
hparams = checkpoint["hyper_parameters"]
# One Can Modify Hyperparameters
hparams["checkpoint_path"] = ckpnt_path
hparams["input_dir"] = "run/feature_store"
hparams["output_dir"] = "run/gnn_processed"
hparams["artifact_library"] = "lightning_models/lightning_checkpoints"
hparams["train_split"] = [0, 0, 5000]
hparams["map_location"] = device
# Init InteractionGNN
model = InteractionGNN(hparams)
model = model.load_from_checkpoint(**hparams)
# (1) Dataset:: LightningModule
model.setup(stage="fit")
# testset = model.testset
test_dataloader = model.test_dataloader()
# (2) Dataset:: LightningDataModule
# dm = SttDataModule(hparams)
# dm.setup(stage="test")
# test_dataloader = dm.test_dataloader()
# evaluate model, returns lists
scores, truths = eval_model(model, test_dataloader)
print("Prediction Finished")
print("\nScores: {}, Truth: {}".format(scores.size(0), truths.size(0)))
if __name__ == "__main__":
main()