nets.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. import torch.nn.init as init
  5. from .box_utils import Detect, PriorBox
  6. class L2Norm(nn.Module):
  7. def __init__(self, n_channels, scale):
  8. super(L2Norm, self).__init__()
  9. self.n_channels = n_channels
  10. self.gamma = scale or None
  11. self.eps = 1e-10
  12. self.weight = nn.Parameter(torch.Tensor(self.n_channels))
  13. self.reset_parameters()
  14. def reset_parameters(self):
  15. init.constant_(self.weight, self.gamma)
  16. def forward(self, x):
  17. norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
  18. x = torch.div(x, norm)
  19. out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x
  20. return out
  21. class S3FDNet(nn.Module):
  22. def __init__(self, device='cuda'):
  23. super(S3FDNet, self).__init__()
  24. self.device = device
  25. self.vgg = nn.ModuleList([
  26. nn.Conv2d(3, 64, 3, 1, padding=1),
  27. nn.ReLU(inplace=True),
  28. nn.Conv2d(64, 64, 3, 1, padding=1),
  29. nn.ReLU(inplace=True),
  30. nn.MaxPool2d(2, 2),
  31. nn.Conv2d(64, 128, 3, 1, padding=1),
  32. nn.ReLU(inplace=True),
  33. nn.Conv2d(128, 128, 3, 1, padding=1),
  34. nn.ReLU(inplace=True),
  35. nn.MaxPool2d(2, 2),
  36. nn.Conv2d(128, 256, 3, 1, padding=1),
  37. nn.ReLU(inplace=True),
  38. nn.Conv2d(256, 256, 3, 1, padding=1),
  39. nn.ReLU(inplace=True),
  40. nn.Conv2d(256, 256, 3, 1, padding=1),
  41. nn.ReLU(inplace=True),
  42. nn.MaxPool2d(2, 2, ceil_mode=True),
  43. nn.Conv2d(256, 512, 3, 1, padding=1),
  44. nn.ReLU(inplace=True),
  45. nn.Conv2d(512, 512, 3, 1, padding=1),
  46. nn.ReLU(inplace=True),
  47. nn.Conv2d(512, 512, 3, 1, padding=1),
  48. nn.ReLU(inplace=True),
  49. nn.MaxPool2d(2, 2),
  50. nn.Conv2d(512, 512, 3, 1, padding=1),
  51. nn.ReLU(inplace=True),
  52. nn.Conv2d(512, 512, 3, 1, padding=1),
  53. nn.ReLU(inplace=True),
  54. nn.Conv2d(512, 512, 3, 1, padding=1),
  55. nn.ReLU(inplace=True),
  56. nn.MaxPool2d(2, 2),
  57. nn.Conv2d(512, 1024, 3, 1, padding=6, dilation=6),
  58. nn.ReLU(inplace=True),
  59. nn.Conv2d(1024, 1024, 1, 1),
  60. nn.ReLU(inplace=True),
  61. ])
  62. self.L2Norm3_3 = L2Norm(256, 10)
  63. self.L2Norm4_3 = L2Norm(512, 8)
  64. self.L2Norm5_3 = L2Norm(512, 5)
  65. self.extras = nn.ModuleList([
  66. nn.Conv2d(1024, 256, 1, 1),
  67. nn.Conv2d(256, 512, 3, 2, padding=1),
  68. nn.Conv2d(512, 128, 1, 1),
  69. nn.Conv2d(128, 256, 3, 2, padding=1),
  70. ])
  71. self.loc = nn.ModuleList([
  72. nn.Conv2d(256, 4, 3, 1, padding=1),
  73. nn.Conv2d(512, 4, 3, 1, padding=1),
  74. nn.Conv2d(512, 4, 3, 1, padding=1),
  75. nn.Conv2d(1024, 4, 3, 1, padding=1),
  76. nn.Conv2d(512, 4, 3, 1, padding=1),
  77. nn.Conv2d(256, 4, 3, 1, padding=1),
  78. ])
  79. self.conf = nn.ModuleList([
  80. nn.Conv2d(256, 4, 3, 1, padding=1),
  81. nn.Conv2d(512, 2, 3, 1, padding=1),
  82. nn.Conv2d(512, 2, 3, 1, padding=1),
  83. nn.Conv2d(1024, 2, 3, 1, padding=1),
  84. nn.Conv2d(512, 2, 3, 1, padding=1),
  85. nn.Conv2d(256, 2, 3, 1, padding=1),
  86. ])
  87. self.softmax = nn.Softmax(dim=-1)
  88. self.detect = Detect()
  89. def forward(self, x):
  90. size = x.size()[2:]
  91. sources = list()
  92. loc = list()
  93. conf = list()
  94. for k in range(16):
  95. x = self.vgg[k](x)
  96. s = self.L2Norm3_3(x)
  97. sources.append(s)
  98. for k in range(16, 23):
  99. x = self.vgg[k](x)
  100. s = self.L2Norm4_3(x)
  101. sources.append(s)
  102. for k in range(23, 30):
  103. x = self.vgg[k](x)
  104. s = self.L2Norm5_3(x)
  105. sources.append(s)
  106. for k in range(30, len(self.vgg)):
  107. x = self.vgg[k](x)
  108. sources.append(x)
  109. # apply extra layers and cache source layer outputs
  110. for k, v in enumerate(self.extras):
  111. x = F.relu(v(x), inplace=True)
  112. if k % 2 == 1:
  113. sources.append(x)
  114. # apply multibox head to source layers
  115. loc_x = self.loc[0](sources[0])
  116. conf_x = self.conf[0](sources[0])
  117. max_conf, _ = torch.max(conf_x[:, 0:3, :, :], dim=1, keepdim=True)
  118. conf_x = torch.cat((max_conf, conf_x[:, 3:, :, :]), dim=1)
  119. loc.append(loc_x.permute(0, 2, 3, 1).contiguous())
  120. conf.append(conf_x.permute(0, 2, 3, 1).contiguous())
  121. for i in range(1, len(sources)):
  122. x = sources[i]
  123. conf.append(self.conf[i](x).permute(0, 2, 3, 1).contiguous())
  124. loc.append(self.loc[i](x).permute(0, 2, 3, 1).contiguous())
  125. features_maps = []
  126. for i in range(len(loc)):
  127. feat = []
  128. feat += [loc[i].size(1), loc[i].size(2)]
  129. features_maps += [feat]
  130. loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
  131. conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
  132. with torch.no_grad():
  133. self.priorbox = PriorBox(size, features_maps)
  134. self.priors = self.priorbox.forward()
  135. output = self.detect.forward(
  136. loc.view(loc.size(0), -1, 4),
  137. self.softmax(conf.view(conf.size(0), -1, 2)),
  138. self.priors.type(type(x.data)).to(self.device)
  139. )
  140. return output