-
Notifications
You must be signed in to change notification settings - Fork 0
/
get_method.py
134 lines (111 loc) · 3.98 KB
/
get_method.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import os
from torchvision import transforms, datasets, models
import torch
from torch import optim, cuda
from torch.utils.data import DataLoader, sampler
import torch.nn as nn
import numpy as np
import pandas as pd
import os
from PIL import Image
from torchsummary import summary
from timeit import default_timer as timer
import matplotlib.pyplot as plt
from datahandler import accuracy
from inceptionv4 import inceptionv4
n_classes=2
train_on_gpu=cuda.is_available()
if train_on_gpu:
gpu_count = cuda.device_count()
print(f'{gpu_count} gpus detected.')
if gpu_count > 1:
multi_gpu = True
else:
multi_gpu = False
def imshow(image):
"""Display image"""
plt.figure(figsize=(6, 6))
plt.imshow(image)
plt.axis('off')
plt.show()
def get_pretrained_model(model_name):
"""Retrieve a pre-trained model from torchvision
Params
-------
model_name (str): name of the model (currently only accepts vgg16 and resnet50)
Return
--------
model (PyTorch model): cnn
"""
if model_name == 'vgg16':
model = models.vgg16(pretrained=True)
# Freeze early layers
for param in model.parameters():
param.requires_grad = False
n_inputs = model.classifier[6].in_features
# Add on classifier
model.classifier[6] = nn.Sequential(
nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),
nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))
elif model_name == 'resnet50':
model = models.resnet50(pretrained=True)
for param in model.parameters():
param.requires_grad = False
n_inputs = model.fc.in_features
model.fc = nn.Sequential(
nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),
nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))
elif model_name== 'densenet121':
model=models.densenet121(pretrained=True)
for param in model.parameters():
param.requires_grad = False
num_ftrs = model.classifier.in_features
model.classifier=nn.Linear(num_ftrs,2)
#model=model.cuda()
# Move to gpu and parallelize
if train_on_gpu:
model = model.to('cuda')
if multi_gpu:
model = nn.DataParallel(model)
return model
def evaluate(model, test_loader, criterion, topk=[1]):
"""Measure the performance of a trained PyTorch model
Params
--------
model (PyTorch model): trained cnn for inference
test_loader (PyTorch DataLoader): test dataloader
topk (tuple of ints): accuracy to measure
Returns
--------
results (DataFrame): results for each category
"""
classes = []
losses = []
# Hold accuracy results
acc_results = np.zeros((len(test_loader.dataset), len(topk)))
i = 0
model.eval()
with torch.no_grad():
# Testing loop
for data, targets in test_loader:
# Tensors to gpu
if train_on_gpu:
data, targets = data.to('cuda'), targets.to('cuda')
# Raw model output
out = model(data)
# Iterate through each example
for pred, true in zip(out, targets):
# Find topk accuracy
acc_results[i, :] = accuracy(
pred.unsqueeze(0), true.unsqueeze(0), topk)
classes.append(model.idx_to_class[true.item()])
# Calculate the loss
loss = criterion(pred.view(1, n_classes), true.view(1))
losses.append(loss.item())
i += 1
# Send results to a dataframe and calculate average across classes
results = pd.DataFrame(acc_results, columns=[f'top{i}' for i in topk])
results['class'] = classes
results['loss'] = losses
results = results.groupby(classes).mean()
return results.reset_index().rename(columns={'index': 'class'})