-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathloss.py
71 lines (47 loc) · 1.91 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
#!usr/bin/python
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
class TaylorSoftmax(nn.Module):
def __init__(self, dim=1, n=2):
super(TaylorSoftmax, self).__init__()
assert n % 2 == 0
self.dim = dim
self.n = n
def forward(self, x):
fn = torch.ones_like(x)
denor = 1.
for i in range(1, self.n + 1):
denor *= i
fn = fn + x.pow(i) / denor
out = fn / fn.sum(dim=self.dim, keepdims=True)
return out
class LabelSmoothingLoss(nn.Module):
def __init__(self, classes, smoothing=0.0, dim=-1):
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.cls = classes
self.dim = dim
def forward(self, pred, target):
"""Taylor Softmax and log are already applied on the logits"""
# pred = pred.log_softmax(dim=self.dim)
with torch.no_grad():
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
class TaylorCrossEntropyLoss(nn.Module):
def __init__(self, n=2, num_classes=5, ignore_index=-1, reduction='mean', smoothing=0.2):
super(TaylorCrossEntropyLoss, self).__init__()
assert n % 2 == 0
self.taylor_softmax = TaylorSoftmax(dim=1, n=n)
self.reduction = reduction
self.ignore_index = ignore_index
self.lab_smooth = LabelSmoothingLoss(num_classes, smoothing=smoothing)
def forward(self, logits, labels):
log_probs = self.taylor_softmax(logits).log()
# loss = F.nll_loss(log_probs, labels, reduction=self.reduction,
# ignore_index=self.ignore_index)
loss = self.lab_smooth(log_probs, labels)
return loss