-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
52 lines (37 loc) · 1.21 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import torch
from model import Model
from dataset import MaskData
from torch.utils.data import DataLoader
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Define the model
model = Model()
model.train()
# hyperparameter
epoch = 8
learning_rate = 0.0001
batchsize = 8
# Define the loss_function
loss_func = nn.BCELoss()
# Define the optimizer
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# dataset 불러오기, dataloader 사용
traindataset = MaskData(root_dir="./dataset", is_train=True)
# print(traindataset[0])
dataloader = DataLoader(traindataset, batch_size=batchsize, shuffle=True, drop_last=True)
loss_arr = []
for epoch_ in range(epoch):
for i, (data, label) in enumerate(dataloader):
optimizer.zero_grad()
y = model.forward(data)
loss = loss_func(y, label)
loss.backward()
optimizer.step()
if i % 50 == 0:
loss_arr.append(loss.detach().numpy())
print(f'{epoch_} : {loss}')
torch.save(model.state_dict(), './last.pth')
plt.plot(loss_arr)
plt.show()