-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathloss.py
173 lines (134 loc) · 6.04 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
def ori_smooth_l1_loss(ouptut,traget,bs,weight):
## smooth l1 loss
sigma = 3.0
ouptut = ouptut * weight
traget = traget * weight
and1 = np.logical_and((ouptut) > 3.14, (traget) > 0)
and2 = np.logical_and((ouptut) < -3.14, (traget) < 0)
idx1, idx2 = np.where(np.logical_or(and1, and2))[0], \
np.where(np.logical_not(np.logical_or(and1, and2)))[0]
loss1 = traget[idx1] + ouptut[idx1]
loss1 = torch.abs(loss1)
loss1 = torch.sum(torch.where(loss1 < 1, 0.5 * (loss1*sigma) ** 2, loss1 - (0.5/sigma**2)))
loss2 = traget[idx2] - ouptut[idx2]
loss2 = torch.abs(loss2)
loss2 = torch.sum(torch.where(loss2 < 1, 0.5 * (loss2*sigma) ** 2, loss2 - (0.5/sigma**2)))
loss_l1 = (loss1+loss2) / bs
return loss_l1
def smooth_l1_loss(output,target,bs,weight):
sigma = 9.0
PI = 3.1415926
and1 = np.logical_and((output) > PI, (target) > 0)
and2 = np.logical_and((output) < -PI, (target) < 0)
idx1, idx2 = np.logical_or(and1, and2).cuda(), \
np.logical_not(np.logical_or(and1,and2)).cuda()
loss1 = torch.abs(idx1.float() * (target + output)) ## add
loss2 = torch.abs(idx2.float() * (target - output)) ## sub
diff = loss1 + loss2
loss = torch.where(diff < 1.0/sigma, 0.5 * (diff*diff*sigma), diff - 0.5/sigma)
loss = loss * weight
#loss_l1 = torch.sum(loss)/norm *1.0
#print loss.size()
loss_l1 = torch.sum(loss) / bs
return loss_l1
def focal_loss(output,target,bs,alpha,gamma):
## focal loss
eps = 1e-8
"""
pos_idx = np.where(target == 1)[0]
neg_idx = np.where(target == 0)[0]
loss_pos = -alpha * torch.sum(
torch.pow(1.0 - output[pos_idx], gamma) * torch.log(output[pos_idx] + eps))
loss_neg = -(1 - alpha) * torch.sum(
torch.pow(output[neg_idx], gamma) * torch.log(1.0 - output[neg_idx] + eps))
loss_focal = torch.div(loss_neg + loss_pos, bs)
"""
loss = -alpha * target * torch.pow(1.0 - output,2) * torch.log(output + 1e-8) - \
(1.0 - alpha) * (1.0 - target) * torch.pow(output,2) * torch.log(1.0 - output + 1e-8)
loss_focal = torch.mean(loss)
return loss_focal
def bce_loss(output,target,bs,alpha,gamma):
#weight = torch.from_numpy(np.array([1.0 - alpha,alpha])).float().cuda()
#neg_output = 1.0 - output
#output = torch.stack([neg_output,output],1)
#weight = target*alpha + (1.0-target)*(1-alpha)
#loss = F.binary_cross_entropy(output,target,weight)
loss = -alpha*target*torch.log(output + 1e-8) - (1-alpha)*(1-target)*torch.log(1.0-output + 1e-8)
loss = torch.sum(loss)/15.0
return loss
def attentional_focal_loss(output,target,bs,alpha,gamma):
loss = -alpha * target * (4**((1.0 - output)**0.5)) * torch.log(output + 1e-8) - \
(1.0 - alpha) * (1.0 - target) * (4**(output** 0.5)) * torch.log(1.0 - output + 1e-8)
loss_focal = torch.sum(loss)/15.0
return loss_focal
def l1_loss(output,target,bs):
return F.smooth_l1_loss(output,target)
def bce_loss2(output,target,bs,alpha,gamma):
## the output should be without sigmoid
# sigmoid_output = F.sigmoid(output)
weight = target * alpha + (1.0 - target) * (1 - alpha)
loss = -(
output * (target - (output>=0)) -
torch.log(1 + torch.exp(output - 2*output*(output>=0)) + 1e-8)
)
loss = loss * weight
loss = torch.sum(loss)/bs
return loss
def attentional_focal_loss2(output,target,bs,alpha,gamma):
sigmoid_output = F.sigmoid(output)
weight = target*alpha * (4**((1.0-sigmoid_output)**0.5)) + \
(1.0-target)*(1.0-alpha) * (4**(sigmoid_output**0.5))
loss = -(
output * (target - (output >= 0).float()) -
torch.log(1 + torch.exp(output - 2 * output * (output >= 0).float()) + 1e-8)
)
loss = loss * weight
loss = torch.sum(loss) / bs
return loss
def attentional_focal_loss3(output,target,bs,alpha,gamma):
sigmoid_output = F.sigmoid(output)
weight = target * alpha * (4.0**((1.0 - sigmoid_output)**0.5)) + (1.0 - target) *(1.0 - alpha) * (4.0**(sigmoid_output** 0.5))
loss = F.binary_cross_entropy_with_logits(output, target, weight, size_average=False,reduce=False)
loss = torch.sum(loss) / bs
return loss
class Focal_L1_Loss(nn.Module):
def __init__(self,alpha=0.1,gamma=2,lamda=0.5):
super(Focal_L1_Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.lamda = lamda
def forward(self,output_b,output_o,label):
"""
output_b: [N,1,H,W]
output_o: [N,1,H,W]
label: [N,2,H,W]
"""
batch_size, _, height, width = label.size()
label_b, label_o = label[:, 0], label[:, 1]
output_b,output_o,label_b,label_o = output_b.contiguous().view(batch_size*height*width,),\
output_o.contiguous().view(batch_size*height*width,),\
label_b.contiguous().view(batch_size*height*width,),\
label_o.contiguous().view(batch_size*height*width,) ## [N*H*W,]
# print label_b.size()
# print torch.sum(label_b==1),torch.sum(label_b==0)
# torch.nn.CrossEntropyLoss
num_pos,num_neg = torch.sum(label_b==1).float(),torch.sum(label_b==0).float()
alpha = num_neg/(num_pos+num_neg)*1.0
# loss_focal = focal_loss(output_b,label_b,batch_size,self.alpha,self.gamma)
loss_focal = attentional_focal_loss3(output_b,label_b,batch_size,alpha,self.gamma)
loss_l1 = ori_smooth_l1_loss(output_o,label_o.float(),batch_size,label_b.float())
print loss_focal.item(),loss_l1.item()
return loss_focal, self.lamda * loss_l1
if __name__ == '__main__':
N = 2
H,W = 200,400
label_o = torch.rand(N,1,H,W).float()
label_b = torch.randint(0,2,size=(N,1,H,W)).float()
label = torch.stack((label_b,label_o),1)
o_b,o_o = torch.rand(N, 1, H, W),torch.rand(N, 1, H, W)
orientation = Focal_L1_Loss()
orientation(o_b,o_o,label)