loss.py 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697
  1. # Loss functions
  2. import torch
  3. import torch.nn as nn
  4. import torch.nn.functional as F
  5. from utils.general import bbox_iou, bbox_alpha_iou, box_iou, box_giou, box_diou, box_ciou, xywh2xyxy
  6. from utils.torch_utils import is_parallel
  7. def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
  8. # return positive, negative label smoothing BCE targets
  9. return 1.0 - 0.5 * eps, 0.5 * eps
  10. class BCEBlurWithLogitsLoss(nn.Module):
  11. # BCEwithLogitLoss() with reduced missing label effects.
  12. def __init__(self, alpha=0.05):
  13. super(BCEBlurWithLogitsLoss, self).__init__()
  14. self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss()
  15. self.alpha = alpha
  16. def forward(self, pred, true):
  17. loss = self.loss_fcn(pred, true)
  18. pred = torch.sigmoid(pred) # prob from logits
  19. dx = pred - true # reduce only missing label effects
  20. # dx = (pred - true).abs() # reduce missing label and false label effects
  21. alpha_factor = 1 - torch.exp((dx - 1) / (self.alpha + 1e-4))
  22. loss *= alpha_factor
  23. return loss.mean()
  24. class SigmoidBin(nn.Module):
  25. stride = None # strides computed during build
  26. export = False # onnx export
  27. def __init__(self, bin_count=10, min=0.0, max=1.0, reg_scale = 2.0, use_loss_regression=True, use_fw_regression=True, BCE_weight=1.0, smooth_eps=0.0):
  28. super(SigmoidBin, self).__init__()
  29. self.bin_count = bin_count
  30. self.length = bin_count + 1
  31. self.min = min
  32. self.max = max
  33. self.scale = float(max - min)
  34. self.shift = self.scale / 2.0
  35. self.use_loss_regression = use_loss_regression
  36. self.use_fw_regression = use_fw_regression
  37. self.reg_scale = reg_scale
  38. self.BCE_weight = BCE_weight
  39. start = min + (self.scale/2.0) / self.bin_count
  40. end = max - (self.scale/2.0) / self.bin_count
  41. step = self.scale / self.bin_count
  42. self.step = step
  43. #print(f" start = {start}, end = {end}, step = {step} ")
  44. bins = torch.range(start, end + 0.0001, step).float()
  45. self.register_buffer('bins', bins)
  46. self.cp = 1.0 - 0.5 * smooth_eps
  47. self.cn = 0.5 * smooth_eps
  48. self.BCEbins = nn.BCEWithLogitsLoss(pos_weight=torch.Tensor([BCE_weight]))
  49. self.MSELoss = nn.MSELoss()
  50. def get_length(self):
  51. return self.length
  52. def forward(self, pred):
  53. assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length)
  54. pred_reg = (pred[..., 0] * self.reg_scale - self.reg_scale/2.0) * self.step
  55. pred_bin = pred[..., 1:(1+self.bin_count)]
  56. _, bin_idx = torch.max(pred_bin, dim=-1)
  57. bin_bias = self.bins[bin_idx]
  58. if self.use_fw_regression:
  59. result = pred_reg + bin_bias
  60. else:
  61. result = bin_bias
  62. result = result.clamp(min=self.min, max=self.max)
  63. return result
  64. def training_loss(self, pred, target):
  65. assert pred.shape[-1] == self.length, 'pred.shape[-1]=%d is not equal to self.length=%d' % (pred.shape[-1], self.length)
  66. assert pred.shape[0] == target.shape[0], 'pred.shape=%d is not equal to the target.shape=%d' % (pred.shape[0], target.shape[0])
  67. device = pred.device
  68. pred_reg = (pred[..., 0].sigmoid() * self.reg_scale - self.reg_scale/2.0) * self.step
  69. pred_bin = pred[..., 1:(1+self.bin_count)]
  70. diff_bin_target = torch.abs(target[..., None] - self.bins)
  71. _, bin_idx = torch.min(diff_bin_target, dim=-1)
  72. bin_bias = self.bins[bin_idx]
  73. bin_bias.requires_grad = False
  74. result = pred_reg + bin_bias
  75. target_bins = torch.full_like(pred_bin, self.cn, device=device) # targets
  76. n = pred.shape[0]
  77. target_bins[range(n), bin_idx] = self.cp
  78. loss_bin = self.BCEbins(pred_bin, target_bins) # BCE
  79. if self.use_loss_regression:
  80. loss_regression = self.MSELoss(result, target) # MSE
  81. loss = loss_bin + loss_regression
  82. else:
  83. loss = loss_bin
  84. out_result = result.clamp(min=self.min, max=self.max)
  85. return loss, out_result
  86. class FocalLoss(nn.Module):
  87. # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  88. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  89. super(FocalLoss, self).__init__()
  90. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  91. self.gamma = gamma
  92. self.alpha = alpha
  93. self.reduction = loss_fcn.reduction
  94. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  95. def forward(self, pred, true):
  96. loss = self.loss_fcn(pred, true)
  97. # p_t = torch.exp(-loss)
  98. # loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
  99. # TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
  100. pred_prob = torch.sigmoid(pred) # prob from logits
  101. p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
  102. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  103. modulating_factor = (1.0 - p_t) ** self.gamma
  104. loss *= alpha_factor * modulating_factor
  105. if self.reduction == 'mean':
  106. return loss.mean()
  107. elif self.reduction == 'sum':
  108. return loss.sum()
  109. else: # 'none'
  110. return loss
  111. class QFocalLoss(nn.Module):
  112. # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
  113. def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
  114. super(QFocalLoss, self).__init__()
  115. self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
  116. self.gamma = gamma
  117. self.alpha = alpha
  118. self.reduction = loss_fcn.reduction
  119. self.loss_fcn.reduction = 'none' # required to apply FL to each element
  120. def forward(self, pred, true):
  121. loss = self.loss_fcn(pred, true)
  122. pred_prob = torch.sigmoid(pred) # prob from logits
  123. alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
  124. modulating_factor = torch.abs(true - pred_prob) ** self.gamma
  125. loss *= alpha_factor * modulating_factor
  126. if self.reduction == 'mean':
  127. return loss.mean()
  128. elif self.reduction == 'sum':
  129. return loss.sum()
  130. else: # 'none'
  131. return loss
  132. class RankSort(torch.autograd.Function):
  133. @staticmethod
  134. def forward(ctx, logits, targets, delta_RS=0.50, eps=1e-10):
  135. classification_grads=torch.zeros(logits.shape).cuda()
  136. #Filter fg logits
  137. fg_labels = (targets > 0.)
  138. fg_logits = logits[fg_labels]
  139. fg_targets = targets[fg_labels]
  140. fg_num = len(fg_logits)
  141. #Do not use bg with scores less than minimum fg logit
  142. #since changing its score does not have an effect on precision
  143. threshold_logit = torch.min(fg_logits)-delta_RS
  144. relevant_bg_labels=((targets==0) & (logits>=threshold_logit))
  145. relevant_bg_logits = logits[relevant_bg_labels]
  146. relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
  147. sorting_error=torch.zeros(fg_num).cuda()
  148. ranking_error=torch.zeros(fg_num).cuda()
  149. fg_grad=torch.zeros(fg_num).cuda()
  150. #sort the fg logits
  151. order=torch.argsort(fg_logits)
  152. #Loops over each positive following the order
  153. for ii in order:
  154. # Difference Transforms (x_ij)
  155. fg_relations=fg_logits-fg_logits[ii]
  156. bg_relations=relevant_bg_logits-fg_logits[ii]
  157. if delta_RS > 0:
  158. fg_relations=torch.clamp(fg_relations/(2*delta_RS)+0.5,min=0,max=1)
  159. bg_relations=torch.clamp(bg_relations/(2*delta_RS)+0.5,min=0,max=1)
  160. else:
  161. fg_relations = (fg_relations >= 0).float()
  162. bg_relations = (bg_relations >= 0).float()
  163. # Rank of ii among pos and false positive number (bg with larger scores)
  164. rank_pos=torch.sum(fg_relations)
  165. FP_num=torch.sum(bg_relations)
  166. # Rank of ii among all examples
  167. rank=rank_pos+FP_num
  168. # Ranking error of example ii. target_ranking_error is always 0. (Eq. 7)
  169. ranking_error[ii]=FP_num/rank
  170. # Current sorting error of example ii. (Eq. 7)
  171. current_sorting_error = torch.sum(fg_relations*(1-fg_targets))/rank_pos
  172. #Find examples in the target sorted order for example ii
  173. iou_relations = (fg_targets >= fg_targets[ii])
  174. target_sorted_order = iou_relations * fg_relations
  175. #The rank of ii among positives in sorted order
  176. rank_pos_target = torch.sum(target_sorted_order)
  177. #Compute target sorting error. (Eq. 8)
  178. #Since target ranking error is 0, this is also total target error
  179. target_sorting_error= torch.sum(target_sorted_order*(1-fg_targets))/rank_pos_target
  180. #Compute sorting error on example ii
  181. sorting_error[ii] = current_sorting_error - target_sorting_error
  182. #Identity Update for Ranking Error
  183. if FP_num > eps:
  184. #For ii the update is the ranking error
  185. fg_grad[ii] -= ranking_error[ii]
  186. #For negatives, distribute error via ranking pmf (i.e. bg_relations/FP_num)
  187. relevant_bg_grad += (bg_relations*(ranking_error[ii]/FP_num))
  188. #Find the positives that are misranked (the cause of the error)
  189. #These are the ones with smaller IoU but larger logits
  190. missorted_examples = (~ iou_relations) * fg_relations
  191. #Denominotor of sorting pmf
  192. sorting_pmf_denom = torch.sum(missorted_examples)
  193. #Identity Update for Sorting Error
  194. if sorting_pmf_denom > eps:
  195. #For ii the update is the sorting error
  196. fg_grad[ii] -= sorting_error[ii]
  197. #For positives, distribute error via sorting pmf (i.e. missorted_examples/sorting_pmf_denom)
  198. fg_grad += (missorted_examples*(sorting_error[ii]/sorting_pmf_denom))
  199. #Normalize gradients by number of positives
  200. classification_grads[fg_labels]= (fg_grad/fg_num)
  201. classification_grads[relevant_bg_labels]= (relevant_bg_grad/fg_num)
  202. ctx.save_for_backward(classification_grads)
  203. return ranking_error.mean(), sorting_error.mean()
  204. @staticmethod
  205. def backward(ctx, out_grad1, out_grad2):
  206. g1, =ctx.saved_tensors
  207. return g1*out_grad1, None, None, None
  208. class aLRPLoss(torch.autograd.Function):
  209. @staticmethod
  210. def forward(ctx, logits, targets, regression_losses, delta=1., eps=1e-5):
  211. classification_grads=torch.zeros(logits.shape).cuda()
  212. #Filter fg logits
  213. fg_labels = (targets == 1)
  214. fg_logits = logits[fg_labels]
  215. fg_num = len(fg_logits)
  216. #Do not use bg with scores less than minimum fg logit
  217. #since changing its score does not have an effect on precision
  218. threshold_logit = torch.min(fg_logits)-delta
  219. #Get valid bg logits
  220. relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
  221. relevant_bg_logits=logits[relevant_bg_labels]
  222. relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
  223. rank=torch.zeros(fg_num).cuda()
  224. prec=torch.zeros(fg_num).cuda()
  225. fg_grad=torch.zeros(fg_num).cuda()
  226. max_prec=0
  227. #sort the fg logits
  228. order=torch.argsort(fg_logits)
  229. #Loops over each positive following the order
  230. for ii in order:
  231. #x_ij s as score differences with fgs
  232. fg_relations=fg_logits-fg_logits[ii]
  233. #Apply piecewise linear function and determine relations with fgs
  234. fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
  235. #Discard i=j in the summation in rank_pos
  236. fg_relations[ii]=0
  237. #x_ij s as score differences with bgs
  238. bg_relations=relevant_bg_logits-fg_logits[ii]
  239. #Apply piecewise linear function and determine relations with bgs
  240. bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
  241. #Compute the rank of the example within fgs and number of bgs with larger scores
  242. rank_pos=1+torch.sum(fg_relations)
  243. FP_num=torch.sum(bg_relations)
  244. #Store the total since it is normalizer also for aLRP Regression error
  245. rank[ii]=rank_pos+FP_num
  246. #Compute precision for this example to compute classification loss
  247. prec[ii]=rank_pos/rank[ii]
  248. #For stability, set eps to a infinitesmall value (e.g. 1e-6), then compute grads
  249. if FP_num > eps:
  250. fg_grad[ii] = -(torch.sum(fg_relations*regression_losses)+FP_num)/rank[ii]
  251. relevant_bg_grad += (bg_relations*(-fg_grad[ii]/FP_num))
  252. #aLRP with grad formulation fg gradient
  253. classification_grads[fg_labels]= fg_grad
  254. #aLRP with grad formulation bg gradient
  255. classification_grads[relevant_bg_labels]= relevant_bg_grad
  256. classification_grads /= (fg_num)
  257. cls_loss=1-prec.mean()
  258. ctx.save_for_backward(classification_grads)
  259. return cls_loss, rank, order
  260. @staticmethod
  261. def backward(ctx, out_grad1, out_grad2, out_grad3):
  262. g1, =ctx.saved_tensors
  263. return g1*out_grad1, None, None, None, None
  264. class APLoss(torch.autograd.Function):
  265. @staticmethod
  266. def forward(ctx, logits, targets, delta=1.):
  267. classification_grads=torch.zeros(logits.shape).cuda()
  268. #Filter fg logits
  269. fg_labels = (targets == 1)
  270. fg_logits = logits[fg_labels]
  271. fg_num = len(fg_logits)
  272. #Do not use bg with scores less than minimum fg logit
  273. #since changing its score does not have an effect on precision
  274. threshold_logit = torch.min(fg_logits)-delta
  275. #Get valid bg logits
  276. relevant_bg_labels=((targets==0)&(logits>=threshold_logit))
  277. relevant_bg_logits=logits[relevant_bg_labels]
  278. relevant_bg_grad=torch.zeros(len(relevant_bg_logits)).cuda()
  279. rank=torch.zeros(fg_num).cuda()
  280. prec=torch.zeros(fg_num).cuda()
  281. fg_grad=torch.zeros(fg_num).cuda()
  282. max_prec=0
  283. #sort the fg logits
  284. order=torch.argsort(fg_logits)
  285. #Loops over each positive following the order
  286. for ii in order:
  287. #x_ij s as score differences with fgs
  288. fg_relations=fg_logits-fg_logits[ii]
  289. #Apply piecewise linear function and determine relations with fgs
  290. fg_relations=torch.clamp(fg_relations/(2*delta)+0.5,min=0,max=1)
  291. #Discard i=j in the summation in rank_pos
  292. fg_relations[ii]=0
  293. #x_ij s as score differences with bgs
  294. bg_relations=relevant_bg_logits-fg_logits[ii]
  295. #Apply piecewise linear function and determine relations with bgs
  296. bg_relations=torch.clamp(bg_relations/(2*delta)+0.5,min=0,max=1)
  297. #Compute the rank of the example within fgs and number of bgs with larger scores
  298. rank_pos=1+torch.sum(fg_relations)
  299. FP_num=torch.sum(bg_relations)
  300. #Store the total since it is normalizer also for aLRP Regression error
  301. rank[ii]=rank_pos+FP_num
  302. #Compute precision for this example
  303. current_prec=rank_pos/rank[ii]
  304. #Compute interpolated AP and store gradients for relevant bg examples
  305. if (max_prec<=current_prec):
  306. max_prec=current_prec
  307. relevant_bg_grad += (bg_relations/rank[ii])
  308. else:
  309. relevant_bg_grad += (bg_relations/rank[ii])*(((1-max_prec)/(1-current_prec)))
  310. #Store fg gradients
  311. fg_grad[ii]=-(1-max_prec)
  312. prec[ii]=max_prec
  313. #aLRP with grad formulation fg gradient
  314. classification_grads[fg_labels]= fg_grad
  315. #aLRP with grad formulation bg gradient
  316. classification_grads[relevant_bg_labels]= relevant_bg_grad
  317. classification_grads /= fg_num
  318. cls_loss=1-prec.mean()
  319. ctx.save_for_backward(classification_grads)
  320. return cls_loss
  321. @staticmethod
  322. def backward(ctx, out_grad1):
  323. g1, =ctx.saved_tensors
  324. return g1*out_grad1, None, None
  325. class ComputeLoss:
  326. # Compute losses
  327. def __init__(self, model, autobalance=False):
  328. super(ComputeLoss, self).__init__()
  329. device = next(model.parameters()).device # get model device
  330. h = model.hyp # hyperparameters
  331. # Define criteria
  332. BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
  333. BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
  334. # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  335. self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
  336. # Focal loss
  337. g = h['fl_gamma'] # focal loss gamma
  338. if g > 0:
  339. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  340. det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
  341. self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
  342. #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7
  343. #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7
  344. self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
  345. self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
  346. for k in 'na', 'nc', 'nl', 'anchors':
  347. setattr(self, k, getattr(det, k))
  348. def __call__(self, p, targets): # predictions, targets, model
  349. device = targets.device
  350. lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
  351. tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets
  352. # Losses
  353. for i, pi in enumerate(p): # layer index, layer predictions
  354. b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
  355. tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
  356. n = b.shape[0] # number of targets
  357. if n:
  358. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  359. # Regression
  360. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  361. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  362. pbox = torch.cat((pxy, pwh), 1) # predicted box
  363. iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)
  364. lbox += (1.0 - iou).mean() # iou loss
  365. # Objectness
  366. tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
  367. # Classification
  368. if self.nc > 1: # cls loss (only if multiple classes)
  369. t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
  370. t[range(n), tcls[i]] = self.cp
  371. #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype)
  372. lcls += self.BCEcls(ps[:, 5:], t) # BCE
  373. # Append targets to text file
  374. # with open('targets.txt', 'a') as file:
  375. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  376. obji = self.BCEobj(pi[..., 4], tobj)
  377. lobj += obji * self.balance[i] # obj loss
  378. if self.autobalance:
  379. self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
  380. if self.autobalance:
  381. self.balance = [x / self.balance[self.ssi] for x in self.balance]
  382. lbox *= self.hyp['box']
  383. lobj *= self.hyp['obj']
  384. lcls *= self.hyp['cls']
  385. bs = tobj.shape[0] # batch size
  386. loss = lbox + lobj + lcls
  387. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  388. def build_targets(self, p, targets):
  389. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  390. na, nt = self.na, targets.shape[0] # number of anchors, targets
  391. tcls, tbox, indices, anch = [], [], [], []
  392. gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
  393. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
  394. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
  395. g = 0.5 # bias
  396. off = torch.tensor([[0, 0],
  397. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
  398. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
  399. ], device=targets.device).float() * g # offsets
  400. for i in range(self.nl):
  401. anchors = self.anchors[i]
  402. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  403. # Match targets to anchors
  404. t = targets * gain
  405. if nt:
  406. # Matches
  407. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
  408. j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
  409. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
  410. t = t[j] # filter
  411. # Offsets
  412. gxy = t[:, 2:4] # grid xy
  413. gxi = gain[[2, 3]] - gxy # inverse
  414. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  415. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
  416. j = torch.stack((torch.ones_like(j), j, k, l, m))
  417. t = t.repeat((5, 1, 1))[j]
  418. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
  419. else:
  420. t = targets[0]
  421. offsets = 0
  422. # Define
  423. b, c = t[:, :2].long().T # image, class
  424. gxy = t[:, 2:4] # grid xy
  425. gwh = t[:, 4:6] # grid wh
  426. gij = (gxy - offsets).long()
  427. gi, gj = gij.T # grid xy indices
  428. # Append
  429. a = t[:, 6].long() # anchor indices
  430. indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
  431. tbox.append(torch.cat((gxy - gij, gwh), 1)) # box
  432. anch.append(anchors[a]) # anchors
  433. tcls.append(c) # class
  434. return tcls, tbox, indices, anch
  435. class ComputeLossOTA:
  436. # Compute losses
  437. def __init__(self, model, autobalance=False):
  438. super(ComputeLossOTA, self).__init__()
  439. device = next(model.parameters()).device # get model device
  440. h = model.hyp # hyperparameters
  441. # Define criteria
  442. BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
  443. BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
  444. # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  445. self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
  446. # Focal loss
  447. g = h['fl_gamma'] # focal loss gamma
  448. if g > 0:
  449. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  450. det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
  451. self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
  452. self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
  453. self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
  454. for k in 'na', 'nc', 'nl', 'anchors', 'stride':
  455. setattr(self, k, getattr(det, k))
  456. def __call__(self, p, targets, imgs): # predictions, targets, model
  457. device = targets.device
  458. lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
  459. bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)
  460. pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p]
  461. # Losses
  462. for i, pi in enumerate(p): # layer index, layer predictions
  463. b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
  464. tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
  465. n = b.shape[0] # number of targets
  466. if n:
  467. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  468. # Regression
  469. grid = torch.stack([gi, gj], dim=1)
  470. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  471. #pxy = ps[:, :2].sigmoid() * 3. - 1.
  472. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  473. pbox = torch.cat((pxy, pwh), 1) # predicted box
  474. selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
  475. selected_tbox[:, :2] -= grid
  476. iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
  477. lbox += (1.0 - iou).mean() # iou loss
  478. # Objectness
  479. tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
  480. # Classification
  481. selected_tcls = targets[i][:, 1].long()
  482. if self.nc > 1: # cls loss (only if multiple classes)
  483. t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
  484. t[range(n), selected_tcls] = self.cp
  485. lcls += self.BCEcls(ps[:, 5:], t) # BCE
  486. # Append targets to text file
  487. # with open('targets.txt', 'a') as file:
  488. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  489. obji = self.BCEobj(pi[..., 4], tobj)
  490. lobj += obji * self.balance[i] # obj loss
  491. if self.autobalance:
  492. self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
  493. if self.autobalance:
  494. self.balance = [x / self.balance[self.ssi] for x in self.balance]
  495. lbox *= self.hyp['box']
  496. lobj *= self.hyp['obj']
  497. lcls *= self.hyp['cls']
  498. bs = tobj.shape[0] # batch size
  499. loss = lbox + lobj + lcls
  500. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  501. def build_targets(self, p, targets, imgs):
  502. #indices, anch = self.find_positive(p, targets)
  503. indices, anch = self.find_3_positive(p, targets)
  504. #indices, anch = self.find_4_positive(p, targets)
  505. #indices, anch = self.find_5_positive(p, targets)
  506. #indices, anch = self.find_9_positive(p, targets)
  507. matching_bs = [[] for pp in p]
  508. matching_as = [[] for pp in p]
  509. matching_gjs = [[] for pp in p]
  510. matching_gis = [[] for pp in p]
  511. matching_targets = [[] for pp in p]
  512. matching_anchs = [[] for pp in p]
  513. nl = len(p)
  514. for batch_idx in range(p[0].shape[0]):
  515. b_idx = targets[:, 0]==batch_idx
  516. this_target = targets[b_idx]
  517. if this_target.shape[0] == 0:
  518. continue
  519. txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
  520. txyxy = xywh2xyxy(txywh)
  521. pxyxys = []
  522. p_cls = []
  523. p_obj = []
  524. from_which_layer = []
  525. all_b = []
  526. all_a = []
  527. all_gj = []
  528. all_gi = []
  529. all_anch = []
  530. for i, pi in enumerate(p):
  531. b, a, gj, gi = indices[i]
  532. idx = (b == batch_idx)
  533. b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
  534. all_b.append(b)
  535. all_a.append(a)
  536. all_gj.append(gj)
  537. all_gi.append(gi)
  538. all_anch.append(anch[i][idx])
  539. from_which_layer.append(torch.ones(size=(len(b),)) * i)
  540. fg_pred = pi[b, a, gj, gi]
  541. p_obj.append(fg_pred[:, 4:5])
  542. p_cls.append(fg_pred[:, 5:])
  543. grid = torch.stack([gi, gj], dim=1)
  544. pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
  545. #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]
  546. pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
  547. pxywh = torch.cat([pxy, pwh], dim=-1)
  548. pxyxy = xywh2xyxy(pxywh)
  549. pxyxys.append(pxyxy)
  550. pxyxys = torch.cat(pxyxys, dim=0)
  551. if pxyxys.shape[0] == 0:
  552. continue
  553. p_obj = torch.cat(p_obj, dim=0)
  554. p_cls = torch.cat(p_cls, dim=0)
  555. from_which_layer = torch.cat(from_which_layer, dim=0)
  556. all_b = torch.cat(all_b, dim=0)
  557. all_a = torch.cat(all_a, dim=0)
  558. all_gj = torch.cat(all_gj, dim=0)
  559. all_gi = torch.cat(all_gi, dim=0)
  560. all_anch = torch.cat(all_anch, dim=0)
  561. pair_wise_iou = box_iou(txyxy, pxyxys)
  562. pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
  563. top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)
  564. dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
  565. gt_cls_per_image = (
  566. F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
  567. .float()
  568. .unsqueeze(1)
  569. .repeat(1, pxyxys.shape[0], 1)
  570. )
  571. num_gt = this_target.shape[0]
  572. cls_preds_ = (
  573. p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
  574. * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
  575. )
  576. y = cls_preds_.sqrt_()
  577. pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
  578. torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
  579. ).sum(-1)
  580. del cls_preds_
  581. cost = (
  582. pair_wise_cls_loss
  583. + 3.0 * pair_wise_iou_loss
  584. )
  585. matching_matrix = torch.zeros_like(cost)
  586. for gt_idx in range(num_gt):
  587. _, pos_idx = torch.topk(
  588. cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
  589. )
  590. matching_matrix[gt_idx][pos_idx] = 1.0
  591. del top_k, dynamic_ks
  592. anchor_matching_gt = matching_matrix.sum(0)
  593. if (anchor_matching_gt > 1).sum() > 0:
  594. _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
  595. matching_matrix[:, anchor_matching_gt > 1] *= 0.0
  596. matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
  597. fg_mask_inboxes = matching_matrix.sum(0) > 0.0
  598. matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
  599. from_which_layer = from_which_layer[fg_mask_inboxes]
  600. all_b = all_b[fg_mask_inboxes]
  601. all_a = all_a[fg_mask_inboxes]
  602. all_gj = all_gj[fg_mask_inboxes]
  603. all_gi = all_gi[fg_mask_inboxes]
  604. all_anch = all_anch[fg_mask_inboxes]
  605. this_target = this_target[matched_gt_inds]
  606. for i in range(nl):
  607. layer_idx = from_which_layer == i
  608. matching_bs[i].append(all_b[layer_idx])
  609. matching_as[i].append(all_a[layer_idx])
  610. matching_gjs[i].append(all_gj[layer_idx])
  611. matching_gis[i].append(all_gi[layer_idx])
  612. matching_targets[i].append(this_target[layer_idx])
  613. matching_anchs[i].append(all_anch[layer_idx])
  614. for i in range(nl):
  615. if matching_targets[i] != []:
  616. matching_bs[i] = torch.cat(matching_bs[i], dim=0)
  617. matching_as[i] = torch.cat(matching_as[i], dim=0)
  618. matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
  619. matching_gis[i] = torch.cat(matching_gis[i], dim=0)
  620. matching_targets[i] = torch.cat(matching_targets[i], dim=0)
  621. matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
  622. else:
  623. matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  624. matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  625. matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  626. matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  627. matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  628. matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  629. return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
  630. def find_3_positive(self, p, targets):
  631. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  632. na, nt = self.na, targets.shape[0] # number of anchors, targets
  633. indices, anch = [], []
  634. gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
  635. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
  636. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
  637. g = 0.5 # bias
  638. off = torch.tensor([[0, 0],
  639. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
  640. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
  641. ], device=targets.device).float() * g # offsets
  642. for i in range(self.nl):
  643. anchors = self.anchors[i]
  644. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  645. # Match targets to anchors
  646. t = targets * gain
  647. if nt:
  648. # Matches
  649. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
  650. j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
  651. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
  652. t = t[j] # filter
  653. # Offsets
  654. gxy = t[:, 2:4] # grid xy
  655. gxi = gain[[2, 3]] - gxy # inverse
  656. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  657. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
  658. j = torch.stack((torch.ones_like(j), j, k, l, m))
  659. t = t.repeat((5, 1, 1))[j]
  660. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
  661. else:
  662. t = targets[0]
  663. offsets = 0
  664. # Define
  665. b, c = t[:, :2].long().T # image, class
  666. gxy = t[:, 2:4] # grid xy
  667. gwh = t[:, 4:6] # grid wh
  668. gij = (gxy - offsets).long()
  669. gi, gj = gij.T # grid xy indices
  670. # Append
  671. a = t[:, 6].long() # anchor indices
  672. indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
  673. anch.append(anchors[a]) # anchors
  674. return indices, anch
  675. class ComputeLossBinOTA:
  676. # Compute losses
  677. def __init__(self, model, autobalance=False):
  678. super(ComputeLossBinOTA, self).__init__()
  679. device = next(model.parameters()).device # get model device
  680. h = model.hyp # hyperparameters
  681. # Define criteria
  682. BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
  683. BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
  684. #MSEangle = nn.MSELoss().to(device)
  685. # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  686. self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
  687. # Focal loss
  688. g = h['fl_gamma'] # focal loss gamma
  689. if g > 0:
  690. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  691. det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
  692. self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
  693. self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
  694. self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
  695. for k in 'na', 'nc', 'nl', 'anchors', 'stride', 'bin_count':
  696. setattr(self, k, getattr(det, k))
  697. #xy_bin_sigmoid = SigmoidBin(bin_count=11, min=-0.5, max=1.5, use_loss_regression=False).to(device)
  698. wh_bin_sigmoid = SigmoidBin(bin_count=self.bin_count, min=0.0, max=4.0, use_loss_regression=False).to(device)
  699. #angle_bin_sigmoid = SigmoidBin(bin_count=31, min=-1.1, max=1.1, use_loss_regression=False).to(device)
  700. self.wh_bin_sigmoid = wh_bin_sigmoid
  701. def __call__(self, p, targets, imgs): # predictions, targets, model
  702. device = targets.device
  703. lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
  704. bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)
  705. pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p]
  706. # Losses
  707. for i, pi in enumerate(p): # layer index, layer predictions
  708. b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
  709. tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
  710. obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2 # x,y, w-bce, h-bce # xy_bin_sigmoid.get_length()*2
  711. n = b.shape[0] # number of targets
  712. if n:
  713. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  714. # Regression
  715. grid = torch.stack([gi, gj], dim=1)
  716. selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
  717. selected_tbox[:, :2] -= grid
  718. #pxy = ps[:, :2].sigmoid() * 2. - 0.5
  719. ##pxy = ps[:, :2].sigmoid() * 3. - 1.
  720. #pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  721. #pbox = torch.cat((pxy, pwh), 1) # predicted box
  722. #x_loss, px = xy_bin_sigmoid.training_loss(ps[..., 0:12], tbox[i][..., 0])
  723. #y_loss, py = xy_bin_sigmoid.training_loss(ps[..., 12:24], tbox[i][..., 1])
  724. w_loss, pw = self.wh_bin_sigmoid.training_loss(ps[..., 2:(3+self.bin_count)], selected_tbox[..., 2] / anchors[i][..., 0])
  725. h_loss, ph = self.wh_bin_sigmoid.training_loss(ps[..., (3+self.bin_count):obj_idx], selected_tbox[..., 3] / anchors[i][..., 1])
  726. pw *= anchors[i][..., 0]
  727. ph *= anchors[i][..., 1]
  728. px = ps[:, 0].sigmoid() * 2. - 0.5
  729. py = ps[:, 1].sigmoid() * 2. - 0.5
  730. lbox += w_loss + h_loss # + x_loss + y_loss
  731. #print(f"\n px = {px.shape}, py = {py.shape}, pw = {pw.shape}, ph = {ph.shape} \n")
  732. pbox = torch.cat((px.unsqueeze(1), py.unsqueeze(1), pw.unsqueeze(1), ph.unsqueeze(1)), 1).to(device) # predicted box
  733. iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
  734. lbox += (1.0 - iou).mean() # iou loss
  735. # Objectness
  736. tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
  737. # Classification
  738. selected_tcls = targets[i][:, 1].long()
  739. if self.nc > 1: # cls loss (only if multiple classes)
  740. t = torch.full_like(ps[:, (1+obj_idx):], self.cn, device=device) # targets
  741. t[range(n), selected_tcls] = self.cp
  742. lcls += self.BCEcls(ps[:, (1+obj_idx):], t) # BCE
  743. # Append targets to text file
  744. # with open('targets.txt', 'a') as file:
  745. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  746. obji = self.BCEobj(pi[..., obj_idx], tobj)
  747. lobj += obji * self.balance[i] # obj loss
  748. if self.autobalance:
  749. self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
  750. if self.autobalance:
  751. self.balance = [x / self.balance[self.ssi] for x in self.balance]
  752. lbox *= self.hyp['box']
  753. lobj *= self.hyp['obj']
  754. lcls *= self.hyp['cls']
  755. bs = tobj.shape[0] # batch size
  756. loss = lbox + lobj + lcls
  757. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  758. def build_targets(self, p, targets, imgs):
  759. #indices, anch = self.find_positive(p, targets)
  760. indices, anch = self.find_3_positive(p, targets)
  761. #indices, anch = self.find_4_positive(p, targets)
  762. #indices, anch = self.find_5_positive(p, targets)
  763. #indices, anch = self.find_9_positive(p, targets)
  764. matching_bs = [[] for pp in p]
  765. matching_as = [[] for pp in p]
  766. matching_gjs = [[] for pp in p]
  767. matching_gis = [[] for pp in p]
  768. matching_targets = [[] for pp in p]
  769. matching_anchs = [[] for pp in p]
  770. nl = len(p)
  771. for batch_idx in range(p[0].shape[0]):
  772. b_idx = targets[:, 0]==batch_idx
  773. this_target = targets[b_idx]
  774. if this_target.shape[0] == 0:
  775. continue
  776. txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
  777. txyxy = xywh2xyxy(txywh)
  778. pxyxys = []
  779. p_cls = []
  780. p_obj = []
  781. from_which_layer = []
  782. all_b = []
  783. all_a = []
  784. all_gj = []
  785. all_gi = []
  786. all_anch = []
  787. for i, pi in enumerate(p):
  788. obj_idx = self.wh_bin_sigmoid.get_length()*2 + 2
  789. b, a, gj, gi = indices[i]
  790. idx = (b == batch_idx)
  791. b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
  792. all_b.append(b)
  793. all_a.append(a)
  794. all_gj.append(gj)
  795. all_gi.append(gi)
  796. all_anch.append(anch[i][idx])
  797. from_which_layer.append(torch.ones(size=(len(b),)) * i)
  798. fg_pred = pi[b, a, gj, gi]
  799. p_obj.append(fg_pred[:, obj_idx:(obj_idx+1)])
  800. p_cls.append(fg_pred[:, (obj_idx+1):])
  801. grid = torch.stack([gi, gj], dim=1)
  802. pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
  803. #pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
  804. pw = self.wh_bin_sigmoid.forward(fg_pred[..., 2:(3+self.bin_count)].sigmoid()) * anch[i][idx][:, 0] * self.stride[i]
  805. ph = self.wh_bin_sigmoid.forward(fg_pred[..., (3+self.bin_count):obj_idx].sigmoid()) * anch[i][idx][:, 1] * self.stride[i]
  806. pxywh = torch.cat([pxy, pw.unsqueeze(1), ph.unsqueeze(1)], dim=-1)
  807. pxyxy = xywh2xyxy(pxywh)
  808. pxyxys.append(pxyxy)
  809. pxyxys = torch.cat(pxyxys, dim=0)
  810. if pxyxys.shape[0] == 0:
  811. continue
  812. p_obj = torch.cat(p_obj, dim=0)
  813. p_cls = torch.cat(p_cls, dim=0)
  814. from_which_layer = torch.cat(from_which_layer, dim=0)
  815. all_b = torch.cat(all_b, dim=0)
  816. all_a = torch.cat(all_a, dim=0)
  817. all_gj = torch.cat(all_gj, dim=0)
  818. all_gi = torch.cat(all_gi, dim=0)
  819. all_anch = torch.cat(all_anch, dim=0)
  820. pair_wise_iou = box_iou(txyxy, pxyxys)
  821. pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
  822. top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)
  823. dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
  824. gt_cls_per_image = (
  825. F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
  826. .float()
  827. .unsqueeze(1)
  828. .repeat(1, pxyxys.shape[0], 1)
  829. )
  830. num_gt = this_target.shape[0]
  831. cls_preds_ = (
  832. p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
  833. * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
  834. )
  835. y = cls_preds_.sqrt_()
  836. pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
  837. torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
  838. ).sum(-1)
  839. del cls_preds_
  840. cost = (
  841. pair_wise_cls_loss
  842. + 3.0 * pair_wise_iou_loss
  843. )
  844. matching_matrix = torch.zeros_like(cost)
  845. for gt_idx in range(num_gt):
  846. _, pos_idx = torch.topk(
  847. cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
  848. )
  849. matching_matrix[gt_idx][pos_idx] = 1.0
  850. del top_k, dynamic_ks
  851. anchor_matching_gt = matching_matrix.sum(0)
  852. if (anchor_matching_gt > 1).sum() > 0:
  853. _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
  854. matching_matrix[:, anchor_matching_gt > 1] *= 0.0
  855. matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
  856. fg_mask_inboxes = matching_matrix.sum(0) > 0.0
  857. matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
  858. from_which_layer = from_which_layer[fg_mask_inboxes]
  859. all_b = all_b[fg_mask_inboxes]
  860. all_a = all_a[fg_mask_inboxes]
  861. all_gj = all_gj[fg_mask_inboxes]
  862. all_gi = all_gi[fg_mask_inboxes]
  863. all_anch = all_anch[fg_mask_inboxes]
  864. this_target = this_target[matched_gt_inds]
  865. for i in range(nl):
  866. layer_idx = from_which_layer == i
  867. matching_bs[i].append(all_b[layer_idx])
  868. matching_as[i].append(all_a[layer_idx])
  869. matching_gjs[i].append(all_gj[layer_idx])
  870. matching_gis[i].append(all_gi[layer_idx])
  871. matching_targets[i].append(this_target[layer_idx])
  872. matching_anchs[i].append(all_anch[layer_idx])
  873. for i in range(nl):
  874. if matching_targets[i] != []:
  875. matching_bs[i] = torch.cat(matching_bs[i], dim=0)
  876. matching_as[i] = torch.cat(matching_as[i], dim=0)
  877. matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
  878. matching_gis[i] = torch.cat(matching_gis[i], dim=0)
  879. matching_targets[i] = torch.cat(matching_targets[i], dim=0)
  880. matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
  881. else:
  882. matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  883. matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  884. matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  885. matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  886. matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  887. matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  888. return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
  889. def find_3_positive(self, p, targets):
  890. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  891. na, nt = self.na, targets.shape[0] # number of anchors, targets
  892. indices, anch = [], []
  893. gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
  894. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
  895. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
  896. g = 0.5 # bias
  897. off = torch.tensor([[0, 0],
  898. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
  899. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
  900. ], device=targets.device).float() * g # offsets
  901. for i in range(self.nl):
  902. anchors = self.anchors[i]
  903. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  904. # Match targets to anchors
  905. t = targets * gain
  906. if nt:
  907. # Matches
  908. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
  909. j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
  910. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
  911. t = t[j] # filter
  912. # Offsets
  913. gxy = t[:, 2:4] # grid xy
  914. gxi = gain[[2, 3]] - gxy # inverse
  915. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  916. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
  917. j = torch.stack((torch.ones_like(j), j, k, l, m))
  918. t = t.repeat((5, 1, 1))[j]
  919. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
  920. else:
  921. t = targets[0]
  922. offsets = 0
  923. # Define
  924. b, c = t[:, :2].long().T # image, class
  925. gxy = t[:, 2:4] # grid xy
  926. gwh = t[:, 4:6] # grid wh
  927. gij = (gxy - offsets).long()
  928. gi, gj = gij.T # grid xy indices
  929. # Append
  930. a = t[:, 6].long() # anchor indices
  931. indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
  932. anch.append(anchors[a]) # anchors
  933. return indices, anch
  934. class ComputeLossAuxOTA:
  935. # Compute losses
  936. def __init__(self, model, autobalance=False):
  937. super(ComputeLossAuxOTA, self).__init__()
  938. device = next(model.parameters()).device # get model device
  939. h = model.hyp # hyperparameters
  940. # Define criteria
  941. BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))
  942. BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))
  943. # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
  944. self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets
  945. # Focal loss
  946. g = h['fl_gamma'] # focal loss gamma
  947. if g > 0:
  948. BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
  949. det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module
  950. self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7
  951. self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index
  952. self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance
  953. for k in 'na', 'nc', 'nl', 'anchors', 'stride':
  954. setattr(self, k, getattr(det, k))
  955. def __call__(self, p, targets, imgs): # predictions, targets, model
  956. device = targets.device
  957. lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)
  958. bs_aux, as_aux_, gjs_aux, gis_aux, targets_aux, anchors_aux = self.build_targets2(p[:self.nl], targets, imgs)
  959. bs, as_, gjs, gis, targets, anchors = self.build_targets(p[:self.nl], targets, imgs)
  960. pre_gen_gains_aux = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]]
  961. pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p[:self.nl]]
  962. # Losses
  963. for i in range(self.nl): # layer index, layer predictions
  964. pi = p[i]
  965. pi_aux = p[i+self.nl]
  966. b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx
  967. b_aux, a_aux, gj_aux, gi_aux = bs_aux[i], as_aux_[i], gjs_aux[i], gis_aux[i] # image, anchor, gridy, gridx
  968. tobj = torch.zeros_like(pi[..., 0], device=device) # target obj
  969. tobj_aux = torch.zeros_like(pi_aux[..., 0], device=device) # target obj
  970. n = b.shape[0] # number of targets
  971. if n:
  972. ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
  973. # Regression
  974. grid = torch.stack([gi, gj], dim=1)
  975. pxy = ps[:, :2].sigmoid() * 2. - 0.5
  976. pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]
  977. pbox = torch.cat((pxy, pwh), 1) # predicted box
  978. selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]
  979. selected_tbox[:, :2] -= grid
  980. iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
  981. lbox += (1.0 - iou).mean() # iou loss
  982. # Objectness
  983. tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio
  984. # Classification
  985. selected_tcls = targets[i][:, 1].long()
  986. if self.nc > 1: # cls loss (only if multiple classes)
  987. t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets
  988. t[range(n), selected_tcls] = self.cp
  989. lcls += self.BCEcls(ps[:, 5:], t) # BCE
  990. # Append targets to text file
  991. # with open('targets.txt', 'a') as file:
  992. # [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
  993. n_aux = b_aux.shape[0] # number of targets
  994. if n_aux:
  995. ps_aux = pi_aux[b_aux, a_aux, gj_aux, gi_aux] # prediction subset corresponding to targets
  996. grid_aux = torch.stack([gi_aux, gj_aux], dim=1)
  997. pxy_aux = ps_aux[:, :2].sigmoid() * 2. - 0.5
  998. #pxy_aux = ps_aux[:, :2].sigmoid() * 3. - 1.
  999. pwh_aux = (ps_aux[:, 2:4].sigmoid() * 2) ** 2 * anchors_aux[i]
  1000. pbox_aux = torch.cat((pxy_aux, pwh_aux), 1) # predicted box
  1001. selected_tbox_aux = targets_aux[i][:, 2:6] * pre_gen_gains_aux[i]
  1002. selected_tbox_aux[:, :2] -= grid_aux
  1003. iou_aux = bbox_iou(pbox_aux.T, selected_tbox_aux, x1y1x2y2=False, CIoU=True) # iou(prediction, target)
  1004. lbox += 0.25 * (1.0 - iou_aux).mean() # iou loss
  1005. # Objectness
  1006. tobj_aux[b_aux, a_aux, gj_aux, gi_aux] = (1.0 - self.gr) + self.gr * iou_aux.detach().clamp(0).type(tobj_aux.dtype) # iou ratio
  1007. # Classification
  1008. selected_tcls_aux = targets_aux[i][:, 1].long()
  1009. if self.nc > 1: # cls loss (only if multiple classes)
  1010. t_aux = torch.full_like(ps_aux[:, 5:], self.cn, device=device) # targets
  1011. t_aux[range(n_aux), selected_tcls_aux] = self.cp
  1012. lcls += 0.25 * self.BCEcls(ps_aux[:, 5:], t_aux) # BCE
  1013. obji = self.BCEobj(pi[..., 4], tobj)
  1014. obji_aux = self.BCEobj(pi_aux[..., 4], tobj_aux)
  1015. lobj += obji * self.balance[i] + 0.25 * obji_aux * self.balance[i] # obj loss
  1016. if self.autobalance:
  1017. self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()
  1018. if self.autobalance:
  1019. self.balance = [x / self.balance[self.ssi] for x in self.balance]
  1020. lbox *= self.hyp['box']
  1021. lobj *= self.hyp['obj']
  1022. lcls *= self.hyp['cls']
  1023. bs = tobj.shape[0] # batch size
  1024. loss = lbox + lobj + lcls
  1025. return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()
  1026. def build_targets(self, p, targets, imgs):
  1027. indices, anch = self.find_3_positive(p, targets)
  1028. matching_bs = [[] for pp in p]
  1029. matching_as = [[] for pp in p]
  1030. matching_gjs = [[] for pp in p]
  1031. matching_gis = [[] for pp in p]
  1032. matching_targets = [[] for pp in p]
  1033. matching_anchs = [[] for pp in p]
  1034. nl = len(p)
  1035. for batch_idx in range(p[0].shape[0]):
  1036. b_idx = targets[:, 0]==batch_idx
  1037. this_target = targets[b_idx]
  1038. if this_target.shape[0] == 0:
  1039. continue
  1040. txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
  1041. txyxy = xywh2xyxy(txywh)
  1042. pxyxys = []
  1043. p_cls = []
  1044. p_obj = []
  1045. from_which_layer = []
  1046. all_b = []
  1047. all_a = []
  1048. all_gj = []
  1049. all_gi = []
  1050. all_anch = []
  1051. for i, pi in enumerate(p):
  1052. b, a, gj, gi = indices[i]
  1053. idx = (b == batch_idx)
  1054. b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
  1055. all_b.append(b)
  1056. all_a.append(a)
  1057. all_gj.append(gj)
  1058. all_gi.append(gi)
  1059. all_anch.append(anch[i][idx])
  1060. from_which_layer.append(torch.ones(size=(len(b),)) * i)
  1061. fg_pred = pi[b, a, gj, gi]
  1062. p_obj.append(fg_pred[:, 4:5])
  1063. p_cls.append(fg_pred[:, 5:])
  1064. grid = torch.stack([gi, gj], dim=1)
  1065. pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
  1066. #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]
  1067. pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
  1068. pxywh = torch.cat([pxy, pwh], dim=-1)
  1069. pxyxy = xywh2xyxy(pxywh)
  1070. pxyxys.append(pxyxy)
  1071. pxyxys = torch.cat(pxyxys, dim=0)
  1072. if pxyxys.shape[0] == 0:
  1073. continue
  1074. p_obj = torch.cat(p_obj, dim=0)
  1075. p_cls = torch.cat(p_cls, dim=0)
  1076. from_which_layer = torch.cat(from_which_layer, dim=0)
  1077. all_b = torch.cat(all_b, dim=0)
  1078. all_a = torch.cat(all_a, dim=0)
  1079. all_gj = torch.cat(all_gj, dim=0)
  1080. all_gi = torch.cat(all_gi, dim=0)
  1081. all_anch = torch.cat(all_anch, dim=0)
  1082. pair_wise_iou = box_iou(txyxy, pxyxys)
  1083. pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
  1084. top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1)
  1085. dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
  1086. gt_cls_per_image = (
  1087. F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
  1088. .float()
  1089. .unsqueeze(1)
  1090. .repeat(1, pxyxys.shape[0], 1)
  1091. )
  1092. num_gt = this_target.shape[0]
  1093. cls_preds_ = (
  1094. p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
  1095. * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
  1096. )
  1097. y = cls_preds_.sqrt_()
  1098. pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
  1099. torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
  1100. ).sum(-1)
  1101. del cls_preds_
  1102. cost = (
  1103. pair_wise_cls_loss
  1104. + 3.0 * pair_wise_iou_loss
  1105. )
  1106. matching_matrix = torch.zeros_like(cost)
  1107. for gt_idx in range(num_gt):
  1108. _, pos_idx = torch.topk(
  1109. cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
  1110. )
  1111. matching_matrix[gt_idx][pos_idx] = 1.0
  1112. del top_k, dynamic_ks
  1113. anchor_matching_gt = matching_matrix.sum(0)
  1114. if (anchor_matching_gt > 1).sum() > 0:
  1115. _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
  1116. matching_matrix[:, anchor_matching_gt > 1] *= 0.0
  1117. matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
  1118. fg_mask_inboxes = matching_matrix.sum(0) > 0.0
  1119. matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
  1120. from_which_layer = from_which_layer[fg_mask_inboxes]
  1121. all_b = all_b[fg_mask_inboxes]
  1122. all_a = all_a[fg_mask_inboxes]
  1123. all_gj = all_gj[fg_mask_inboxes]
  1124. all_gi = all_gi[fg_mask_inboxes]
  1125. all_anch = all_anch[fg_mask_inboxes]
  1126. this_target = this_target[matched_gt_inds]
  1127. for i in range(nl):
  1128. layer_idx = from_which_layer == i
  1129. matching_bs[i].append(all_b[layer_idx])
  1130. matching_as[i].append(all_a[layer_idx])
  1131. matching_gjs[i].append(all_gj[layer_idx])
  1132. matching_gis[i].append(all_gi[layer_idx])
  1133. matching_targets[i].append(this_target[layer_idx])
  1134. matching_anchs[i].append(all_anch[layer_idx])
  1135. for i in range(nl):
  1136. if matching_targets[i] != []:
  1137. matching_bs[i] = torch.cat(matching_bs[i], dim=0)
  1138. matching_as[i] = torch.cat(matching_as[i], dim=0)
  1139. matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
  1140. matching_gis[i] = torch.cat(matching_gis[i], dim=0)
  1141. matching_targets[i] = torch.cat(matching_targets[i], dim=0)
  1142. matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
  1143. else:
  1144. matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1145. matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1146. matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1147. matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1148. matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1149. matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1150. return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
  1151. def build_targets2(self, p, targets, imgs):
  1152. indices, anch = self.find_5_positive(p, targets)
  1153. matching_bs = [[] for pp in p]
  1154. matching_as = [[] for pp in p]
  1155. matching_gjs = [[] for pp in p]
  1156. matching_gis = [[] for pp in p]
  1157. matching_targets = [[] for pp in p]
  1158. matching_anchs = [[] for pp in p]
  1159. nl = len(p)
  1160. for batch_idx in range(p[0].shape[0]):
  1161. b_idx = targets[:, 0]==batch_idx
  1162. this_target = targets[b_idx]
  1163. if this_target.shape[0] == 0:
  1164. continue
  1165. txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]
  1166. txyxy = xywh2xyxy(txywh)
  1167. pxyxys = []
  1168. p_cls = []
  1169. p_obj = []
  1170. from_which_layer = []
  1171. all_b = []
  1172. all_a = []
  1173. all_gj = []
  1174. all_gi = []
  1175. all_anch = []
  1176. for i, pi in enumerate(p):
  1177. b, a, gj, gi = indices[i]
  1178. idx = (b == batch_idx)
  1179. b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx]
  1180. all_b.append(b)
  1181. all_a.append(a)
  1182. all_gj.append(gj)
  1183. all_gi.append(gi)
  1184. all_anch.append(anch[i][idx])
  1185. from_which_layer.append(torch.ones(size=(len(b),)) * i)
  1186. fg_pred = pi[b, a, gj, gi]
  1187. p_obj.append(fg_pred[:, 4:5])
  1188. p_cls.append(fg_pred[:, 5:])
  1189. grid = torch.stack([gi, gj], dim=1)
  1190. pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.
  1191. #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]
  1192. pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.
  1193. pxywh = torch.cat([pxy, pwh], dim=-1)
  1194. pxyxy = xywh2xyxy(pxywh)
  1195. pxyxys.append(pxyxy)
  1196. pxyxys = torch.cat(pxyxys, dim=0)
  1197. if pxyxys.shape[0] == 0:
  1198. continue
  1199. p_obj = torch.cat(p_obj, dim=0)
  1200. p_cls = torch.cat(p_cls, dim=0)
  1201. from_which_layer = torch.cat(from_which_layer, dim=0)
  1202. all_b = torch.cat(all_b, dim=0)
  1203. all_a = torch.cat(all_a, dim=0)
  1204. all_gj = torch.cat(all_gj, dim=0)
  1205. all_gi = torch.cat(all_gi, dim=0)
  1206. all_anch = torch.cat(all_anch, dim=0)
  1207. pair_wise_iou = box_iou(txyxy, pxyxys)
  1208. pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)
  1209. top_k, _ = torch.topk(pair_wise_iou, min(20, pair_wise_iou.shape[1]), dim=1)
  1210. dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)
  1211. gt_cls_per_image = (
  1212. F.one_hot(this_target[:, 1].to(torch.int64), self.nc)
  1213. .float()
  1214. .unsqueeze(1)
  1215. .repeat(1, pxyxys.shape[0], 1)
  1216. )
  1217. num_gt = this_target.shape[0]
  1218. cls_preds_ = (
  1219. p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
  1220. * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()
  1221. )
  1222. y = cls_preds_.sqrt_()
  1223. pair_wise_cls_loss = F.binary_cross_entropy_with_logits(
  1224. torch.log(y/(1-y)) , gt_cls_per_image, reduction="none"
  1225. ).sum(-1)
  1226. del cls_preds_
  1227. cost = (
  1228. pair_wise_cls_loss
  1229. + 3.0 * pair_wise_iou_loss
  1230. )
  1231. matching_matrix = torch.zeros_like(cost)
  1232. for gt_idx in range(num_gt):
  1233. _, pos_idx = torch.topk(
  1234. cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
  1235. )
  1236. matching_matrix[gt_idx][pos_idx] = 1.0
  1237. del top_k, dynamic_ks
  1238. anchor_matching_gt = matching_matrix.sum(0)
  1239. if (anchor_matching_gt > 1).sum() > 0:
  1240. _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)
  1241. matching_matrix[:, anchor_matching_gt > 1] *= 0.0
  1242. matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0
  1243. fg_mask_inboxes = matching_matrix.sum(0) > 0.0
  1244. matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)
  1245. from_which_layer = from_which_layer[fg_mask_inboxes]
  1246. all_b = all_b[fg_mask_inboxes]
  1247. all_a = all_a[fg_mask_inboxes]
  1248. all_gj = all_gj[fg_mask_inboxes]
  1249. all_gi = all_gi[fg_mask_inboxes]
  1250. all_anch = all_anch[fg_mask_inboxes]
  1251. this_target = this_target[matched_gt_inds]
  1252. for i in range(nl):
  1253. layer_idx = from_which_layer == i
  1254. matching_bs[i].append(all_b[layer_idx])
  1255. matching_as[i].append(all_a[layer_idx])
  1256. matching_gjs[i].append(all_gj[layer_idx])
  1257. matching_gis[i].append(all_gi[layer_idx])
  1258. matching_targets[i].append(this_target[layer_idx])
  1259. matching_anchs[i].append(all_anch[layer_idx])
  1260. for i in range(nl):
  1261. if matching_targets[i] != []:
  1262. matching_bs[i] = torch.cat(matching_bs[i], dim=0)
  1263. matching_as[i] = torch.cat(matching_as[i], dim=0)
  1264. matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)
  1265. matching_gis[i] = torch.cat(matching_gis[i], dim=0)
  1266. matching_targets[i] = torch.cat(matching_targets[i], dim=0)
  1267. matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)
  1268. else:
  1269. matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1270. matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1271. matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1272. matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1273. matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1274. matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)
  1275. return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs
  1276. def find_5_positive(self, p, targets):
  1277. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  1278. na, nt = self.na, targets.shape[0] # number of anchors, targets
  1279. indices, anch = [], []
  1280. gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
  1281. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
  1282. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
  1283. g = 1.0 # bias
  1284. off = torch.tensor([[0, 0],
  1285. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
  1286. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
  1287. ], device=targets.device).float() * g # offsets
  1288. for i in range(self.nl):
  1289. anchors = self.anchors[i]
  1290. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  1291. # Match targets to anchors
  1292. t = targets * gain
  1293. if nt:
  1294. # Matches
  1295. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
  1296. j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
  1297. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
  1298. t = t[j] # filter
  1299. # Offsets
  1300. gxy = t[:, 2:4] # grid xy
  1301. gxi = gain[[2, 3]] - gxy # inverse
  1302. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  1303. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
  1304. j = torch.stack((torch.ones_like(j), j, k, l, m))
  1305. t = t.repeat((5, 1, 1))[j]
  1306. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
  1307. else:
  1308. t = targets[0]
  1309. offsets = 0
  1310. # Define
  1311. b, c = t[:, :2].long().T # image, class
  1312. gxy = t[:, 2:4] # grid xy
  1313. gwh = t[:, 4:6] # grid wh
  1314. gij = (gxy - offsets).long()
  1315. gi, gj = gij.T # grid xy indices
  1316. # Append
  1317. a = t[:, 6].long() # anchor indices
  1318. indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
  1319. anch.append(anchors[a]) # anchors
  1320. return indices, anch
  1321. def find_3_positive(self, p, targets):
  1322. # Build targets for compute_loss(), input targets(image,class,x,y,w,h)
  1323. na, nt = self.na, targets.shape[0] # number of anchors, targets
  1324. indices, anch = [], []
  1325. gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain
  1326. ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)
  1327. targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices
  1328. g = 0.5 # bias
  1329. off = torch.tensor([[0, 0],
  1330. [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m
  1331. # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm
  1332. ], device=targets.device).float() * g # offsets
  1333. for i in range(self.nl):
  1334. anchors = self.anchors[i]
  1335. gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
  1336. # Match targets to anchors
  1337. t = targets * gain
  1338. if nt:
  1339. # Matches
  1340. r = t[:, :, 4:6] / anchors[:, None] # wh ratio
  1341. j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare
  1342. # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))
  1343. t = t[j] # filter
  1344. # Offsets
  1345. gxy = t[:, 2:4] # grid xy
  1346. gxi = gain[[2, 3]] - gxy # inverse
  1347. j, k = ((gxy % 1. < g) & (gxy > 1.)).T
  1348. l, m = ((gxi % 1. < g) & (gxi > 1.)).T
  1349. j = torch.stack((torch.ones_like(j), j, k, l, m))
  1350. t = t.repeat((5, 1, 1))[j]
  1351. offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]
  1352. else:
  1353. t = targets[0]
  1354. offsets = 0
  1355. # Define
  1356. b, c = t[:, :2].long().T # image, class
  1357. gxy = t[:, 2:4] # grid xy
  1358. gwh = t[:, 4:6] # grid wh
  1359. gij = (gxy - offsets).long()
  1360. gi, gj = gij.T # grid xy indices
  1361. # Append
  1362. a = t[:, 6].long() # anchor indices
  1363. indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices
  1364. anch.append(anchors[a]) # anchors
  1365. return indices, anch