general.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. # YOLOR general utils
  2. import glob
  3. import logging
  4. import math
  5. import os
  6. import platform
  7. import random
  8. import re
  9. import subprocess
  10. import time
  11. from pathlib import Path
  12. import cv2
  13. import numpy as np
  14. import pandas as pd
  15. import torch
  16. import torchvision
  17. import yaml
  18. from utils.google_utils import gsutil_getsize
  19. from utils.metrics import fitness
  20. from utils.torch_utils import init_torch_seeds
  21. # Settings
  22. torch.set_printoptions(linewidth=320, precision=5, profile='long')
  23. np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
  24. pd.options.display.max_columns = 10
  25. cv2.setNumThreads(0) # prevent OpenCV from multithreading (incompatible with PyTorch DataLoader)
  26. os.environ['NUMEXPR_MAX_THREADS'] = str(min(os.cpu_count(), 8)) # NumExpr max threads
  27. def set_logging(rank=-1):
  28. logging.basicConfig(
  29. format="%(message)s",
  30. level=logging.INFO if rank in [-1, 0] else logging.WARN)
  31. def init_seeds(seed=0):
  32. # Initialize random number generator (RNG) seeds
  33. random.seed(seed)
  34. np.random.seed(seed)
  35. init_torch_seeds(seed)
  36. def get_latest_run(search_dir='.'):
  37. # Return path to most recent 'last.pt' in /runs (i.e. to --resume from)
  38. last_list = glob.glob(f'{search_dir}/**/last*.pt', recursive=True)
  39. return max(last_list, key=os.path.getctime) if last_list else ''
  40. def isdocker():
  41. # Is environment a Docker container
  42. return Path('/workspace').exists() # or Path('/.dockerenv').exists()
  43. def emojis(str=''):
  44. # Return platform-dependent emoji-safe version of string
  45. return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str
  46. def check_online():
  47. # Check internet connectivity
  48. import socket
  49. try:
  50. socket.create_connection(("1.1.1.1", 443), 5) # check host accesability
  51. return True
  52. except OSError:
  53. return False
  54. def check_git_status():
  55. # Recommend 'git pull' if code is out of date
  56. print(colorstr('github: '), end='')
  57. try:
  58. assert Path('.git').exists(), 'skipping check (not a git repository)'
  59. assert not isdocker(), 'skipping check (Docker image)'
  60. assert check_online(), 'skipping check (offline)'
  61. cmd = 'git fetch && git config --get remote.origin.url'
  62. url = subprocess.check_output(cmd, shell=True).decode().strip().rstrip('.git') # github repo url
  63. branch = subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True).decode().strip() # checked out
  64. n = int(subprocess.check_output(f'git rev-list {branch}..origin/master --count', shell=True)) # commits behind
  65. if n > 0:
  66. s = f"⚠️ WARNING: code is out of date by {n} commit{'s' * (n > 1)}. " \
  67. f"Use 'git pull' to update or 'git clone {url}' to download latest."
  68. else:
  69. s = f'up to date with {url} ✅'
  70. print(emojis(s)) # emoji-safe
  71. except Exception as e:
  72. print(e)
  73. def check_requirements(requirements='requirements.txt', exclude=()):
  74. # Check installed dependencies meet requirements (pass *.txt file or list of packages)
  75. import pkg_resources as pkg
  76. prefix = colorstr('red', 'bold', 'requirements:')
  77. if isinstance(requirements, (str, Path)): # requirements.txt file
  78. file = Path(requirements)
  79. if not file.exists():
  80. print(f"{prefix} {file.resolve()} not found, check failed.")
  81. return
  82. requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(file.open()) if x.name not in exclude]
  83. else: # list or tuple of packages
  84. requirements = [x for x in requirements if x not in exclude]
  85. n = 0 # number of packages updates
  86. for r in requirements:
  87. try:
  88. pkg.require(r)
  89. except Exception as e: # DistributionNotFound or VersionConflict if requirements not met
  90. n += 1
  91. print(f"{prefix} {e.req} not found and is required by YOLOR, attempting auto-update...")
  92. print(subprocess.check_output(f"pip install '{e.req}'", shell=True).decode())
  93. if n: # if packages updated
  94. source = file.resolve() if 'file' in locals() else requirements
  95. s = f"{prefix} {n} package{'s' * (n > 1)} updated per {source}\n" \
  96. f"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\n"
  97. print(emojis(s)) # emoji-safe
  98. def check_img_size(img_size, s=32):
  99. # Verify img_size is a multiple of stride s
  100. new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
  101. if new_size != img_size:
  102. print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
  103. return new_size
  104. def check_imshow():
  105. # Check if environment supports image displays
  106. try:
  107. assert not isdocker(), 'cv2.imshow() is disabled in Docker environments'
  108. cv2.imshow('test', np.zeros((1, 1, 3)))
  109. cv2.waitKey(1)
  110. cv2.destroyAllWindows()
  111. cv2.waitKey(1)
  112. return True
  113. except Exception as e:
  114. print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\n{e}')
  115. return False
  116. def check_file(file):
  117. # Search for file if not found
  118. if Path(file).is_file() or file == '':
  119. return file
  120. else:
  121. files = glob.glob('./**/' + file, recursive=True) # find file
  122. assert len(files), f'File Not Found: {file}' # assert file was found
  123. assert len(files) == 1, f"Multiple files match '{file}', specify exact path: {files}" # assert unique
  124. return files[0] # return file
  125. def check_dataset(dict):
  126. # Download dataset if not found locally
  127. val, s = dict.get('val'), dict.get('download')
  128. if val and len(val):
  129. val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path
  130. if not all(x.exists() for x in val):
  131. print('\nWARNING: Dataset not found, nonexistent paths: %s' % [str(x) for x in val if not x.exists()])
  132. if s and len(s): # download script
  133. print('Downloading %s ...' % s)
  134. if s.startswith('http') and s.endswith('.zip'): # URL
  135. f = Path(s).name # filename
  136. torch.hub.download_url_to_file(s, f)
  137. r = os.system('unzip -q %s -d ../ && rm %s' % (f, f)) # unzip
  138. else: # bash script
  139. r = os.system(s)
  140. print('Dataset autodownload %s\n' % ('success' if r == 0 else 'failure')) # analyze return value
  141. else:
  142. raise Exception('Dataset not found.')
  143. def make_divisible(x, divisor):
  144. # Returns x evenly divisible by divisor
  145. return math.ceil(x / divisor) * divisor
  146. def clean_str(s):
  147. # Cleans a string by replacing special characters with underscore _
  148. return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
  149. def one_cycle(y1=0.0, y2=1.0, steps=100):
  150. # lambda function for sinusoidal ramp from y1 to y2
  151. return lambda x: ((1 - math.cos(x * math.pi / steps)) / 2) * (y2 - y1) + y1
  152. def colorstr(*input):
  153. # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')
  154. *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string
  155. colors = {'black': '\033[30m', # basic colors
  156. 'red': '\033[31m',
  157. 'green': '\033[32m',
  158. 'yellow': '\033[33m',
  159. 'blue': '\033[34m',
  160. 'magenta': '\033[35m',
  161. 'cyan': '\033[36m',
  162. 'white': '\033[37m',
  163. 'bright_black': '\033[90m', # bright colors
  164. 'bright_red': '\033[91m',
  165. 'bright_green': '\033[92m',
  166. 'bright_yellow': '\033[93m',
  167. 'bright_blue': '\033[94m',
  168. 'bright_magenta': '\033[95m',
  169. 'bright_cyan': '\033[96m',
  170. 'bright_white': '\033[97m',
  171. 'end': '\033[0m', # misc
  172. 'bold': '\033[1m',
  173. 'underline': '\033[4m'}
  174. return ''.join(colors[x] for x in args) + f'{string}' + colors['end']
  175. def labels_to_class_weights(labels, nc=80):
  176. # Get class weights (inverse frequency) from training labels
  177. if labels[0] is None: # no labels loaded
  178. return torch.Tensor()
  179. labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
  180. classes = labels[:, 0].astype(np.int) # labels = [class xywh]
  181. weights = np.bincount(classes, minlength=nc) # occurrences per class
  182. # Prepend gridpoint count (for uCE training)
  183. # gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
  184. # weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
  185. weights[weights == 0] = 1 # replace empty bins with 1
  186. weights = 1 / weights # number of targets per class
  187. weights /= weights.sum() # normalize
  188. return torch.from_numpy(weights)
  189. def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
  190. # Produces image weights based on class_weights and image contents
  191. class_counts = np.array([np.bincount(x[:, 0].astype(np.int), minlength=nc) for x in labels])
  192. image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
  193. # index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
  194. return image_weights
  195. def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
  196. # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
  197. # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
  198. # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
  199. # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
  200. # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
  201. x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
  202. 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
  203. 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
  204. return x
  205. def xyxy2xywh(x):
  206. # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
  207. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  208. y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
  209. y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
  210. y[:, 2] = x[:, 2] - x[:, 0] # width
  211. y[:, 3] = x[:, 3] - x[:, 1] # height
  212. return y
  213. def xywh2xyxy(x):
  214. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  215. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  216. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  217. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  218. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  219. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  220. return y
  221. def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):
  222. # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  223. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  224. y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x
  225. y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y
  226. y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x
  227. y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y
  228. return y
  229. def xyn2xy(x, w=640, h=640, padw=0, padh=0):
  230. # Convert normalized segments into pixel segments, shape (n,2)
  231. y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  232. y[:, 0] = w * x[:, 0] + padw # top left x
  233. y[:, 1] = h * x[:, 1] + padh # top left y
  234. return y
  235. def segment2box(segment, width=640, height=640):
  236. # Convert 1 segment label to 1 box label, applying inside-image constraint, i.e. (xy1, xy2, ...) to (xyxy)
  237. x, y = segment.T # segment xy
  238. inside = (x >= 0) & (y >= 0) & (x <= width) & (y <= height)
  239. x, y, = x[inside], y[inside]
  240. return np.array([x.min(), y.min(), x.max(), y.max()]) if any(x) else np.zeros((1, 4)) # xyxy
  241. def segments2boxes(segments):
  242. # Convert segment labels to box labels, i.e. (cls, xy1, xy2, ...) to (cls, xywh)
  243. boxes = []
  244. for s in segments:
  245. x, y = s.T # segment xy
  246. boxes.append([x.min(), y.min(), x.max(), y.max()]) # cls, xyxy
  247. return xyxy2xywh(np.array(boxes)) # cls, xywh
  248. def resample_segments(segments, n=1000):
  249. # Up-sample an (n,2) segment
  250. for i, s in enumerate(segments):
  251. s = np.concatenate((s, s[0:1, :]), axis=0)
  252. x = np.linspace(0, len(s) - 1, n)
  253. xp = np.arange(len(s))
  254. segments[i] = np.concatenate([np.interp(x, xp, s[:, i]) for i in range(2)]).reshape(2, -1).T # segment xy
  255. return segments
  256. def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
  257. # Rescale coords (xyxy) from img1_shape to img0_shape
  258. if ratio_pad is None: # calculate from img0_shape
  259. gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
  260. pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
  261. else:
  262. gain = ratio_pad[0][0]
  263. pad = ratio_pad[1]
  264. coords[:, [0, 2]] -= pad[0] # x padding
  265. coords[:, [1, 3]] -= pad[1] # y padding
  266. coords[:, :4] /= gain
  267. clip_coords(coords, img0_shape)
  268. return coords
  269. def clip_coords(boxes, img_shape):
  270. # Clip bounding xyxy bounding boxes to image shape (height, width)
  271. boxes[:, 0].clamp_(0, img_shape[1]) # x1
  272. boxes[:, 1].clamp_(0, img_shape[0]) # y1
  273. boxes[:, 2].clamp_(0, img_shape[1]) # x2
  274. boxes[:, 3].clamp_(0, img_shape[0]) # y2
  275. def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):
  276. # Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
  277. box2 = box2.T
  278. # Get the coordinates of bounding boxes
  279. if x1y1x2y2: # x1, y1, x2, y2 = box1
  280. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  281. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  282. else: # transform from xywh to xyxy
  283. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  284. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  285. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  286. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  287. # Intersection area
  288. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  289. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  290. # Union Area
  291. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
  292. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
  293. union = w1 * h1 + w2 * h2 - inter + eps
  294. iou = inter / union
  295. if GIoU or DIoU or CIoU:
  296. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  297. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  298. if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  299. c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared
  300. rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 +
  301. (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared
  302. if DIoU:
  303. return iou - rho2 / c2 # DIoU
  304. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  305. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / (h2 + eps)) - torch.atan(w1 / (h1 + eps)), 2)
  306. with torch.no_grad():
  307. alpha = v / (v - iou + (1 + eps))
  308. return iou - (rho2 / c2 + v * alpha) # CIoU
  309. else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
  310. c_area = cw * ch + eps # convex area
  311. return iou - (c_area - union) / c_area # GIoU
  312. else:
  313. return iou # IoU
  314. def bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9):
  315. # Returns tsqrt_he IoU of box1 to box2. box1 is 4, box2 is nx4
  316. box2 = box2.T
  317. # Get the coordinates of bounding boxes
  318. if x1y1x2y2: # x1, y1, x2, y2 = box1
  319. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  320. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  321. else: # transform from xywh to xyxy
  322. b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
  323. b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
  324. b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
  325. b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
  326. # Intersection area
  327. inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
  328. (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
  329. # Union Area
  330. w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
  331. w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
  332. union = w1 * h1 + w2 * h2 - inter + eps
  333. # change iou into pow(iou+eps)
  334. # iou = inter / union
  335. iou = torch.pow(inter/union + eps, alpha)
  336. # beta = 2 * alpha
  337. if GIoU or DIoU or CIoU:
  338. cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
  339. ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
  340. if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
  341. c2 = (cw ** 2 + ch ** 2) ** alpha + eps # convex diagonal
  342. rho_x = torch.abs(b2_x1 + b2_x2 - b1_x1 - b1_x2)
  343. rho_y = torch.abs(b2_y1 + b2_y2 - b1_y1 - b1_y2)
  344. rho2 = ((rho_x ** 2 + rho_y ** 2) / 4) ** alpha # center distance
  345. if DIoU:
  346. return iou - rho2 / c2 # DIoU
  347. elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
  348. v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
  349. with torch.no_grad():
  350. alpha_ciou = v / ((1 + eps) - inter / union + v)
  351. # return iou - (rho2 / c2 + v * alpha_ciou) # CIoU
  352. return iou - (rho2 / c2 + torch.pow(v * alpha_ciou + eps, alpha)) # CIoU
  353. else: # GIoU https://arxiv.org/pdf/1902.09630.pdf
  354. # c_area = cw * ch + eps # convex area
  355. # return iou - (c_area - union) / c_area # GIoU
  356. c_area = torch.max(cw * ch + eps, union) # convex area
  357. return iou - torch.pow((c_area - union) / c_area + eps, alpha) # GIoU
  358. else:
  359. return iou # torch.log(iou+eps) or iou
  360. def box_iou(box1, box2):
  361. # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
  362. """
  363. Return intersection-over-union (Jaccard index) of boxes.
  364. Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
  365. Arguments:
  366. box1 (Tensor[N, 4])
  367. box2 (Tensor[M, 4])
  368. Returns:
  369. iou (Tensor[N, M]): the NxM matrix containing the pairwise
  370. IoU values for every element in boxes1 and boxes2
  371. """
  372. def box_area(box):
  373. # box = 4xn
  374. return (box[2] - box[0]) * (box[3] - box[1])
  375. area1 = box_area(box1.T)
  376. area2 = box_area(box2.T)
  377. # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
  378. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  379. return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
  380. def wh_iou(wh1, wh2):
  381. # Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
  382. wh1 = wh1[:, None] # [N,1,2]
  383. wh2 = wh2[None] # [1,M,2]
  384. inter = torch.min(wh1, wh2).prod(2) # [N,M]
  385. return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
  386. def box_giou(box1, box2):
  387. """
  388. Return generalized intersection-over-union (Jaccard index) between two sets of boxes.
  389. Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
  390. ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
  391. Args:
  392. boxes1 (Tensor[N, 4]): first set of boxes
  393. boxes2 (Tensor[M, 4]): second set of boxes
  394. Returns:
  395. Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values
  396. for every element in boxes1 and boxes2
  397. """
  398. def box_area(box):
  399. # box = 4xn
  400. return (box[2] - box[0]) * (box[3] - box[1])
  401. area1 = box_area(box1.T)
  402. area2 = box_area(box2.T)
  403. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  404. union = (area1[:, None] + area2 - inter)
  405. iou = inter / union
  406. lti = torch.min(box1[:, None, :2], box2[:, :2])
  407. rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
  408. whi = (rbi - lti).clamp(min=0) # [N,M,2]
  409. areai = whi[:, :, 0] * whi[:, :, 1]
  410. return iou - (areai - union) / areai
  411. def box_ciou(box1, box2, eps: float = 1e-7):
  412. """
  413. Return complete intersection-over-union (Jaccard index) between two sets of boxes.
  414. Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
  415. ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
  416. Args:
  417. boxes1 (Tensor[N, 4]): first set of boxes
  418. boxes2 (Tensor[M, 4]): second set of boxes
  419. eps (float, optional): small number to prevent division by zero. Default: 1e-7
  420. Returns:
  421. Tensor[N, M]: the NxM matrix containing the pairwise complete IoU values
  422. for every element in boxes1 and boxes2
  423. """
  424. def box_area(box):
  425. # box = 4xn
  426. return (box[2] - box[0]) * (box[3] - box[1])
  427. area1 = box_area(box1.T)
  428. area2 = box_area(box2.T)
  429. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  430. union = (area1[:, None] + area2 - inter)
  431. iou = inter / union
  432. lti = torch.min(box1[:, None, :2], box2[:, :2])
  433. rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
  434. whi = (rbi - lti).clamp(min=0) # [N,M,2]
  435. diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps
  436. # centers of boxes
  437. x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2
  438. y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2
  439. x_g = (box2[:, 0] + box2[:, 2]) / 2
  440. y_g = (box2[:, 1] + box2[:, 3]) / 2
  441. # The distance between boxes' centers squared.
  442. centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2
  443. w_pred = box1[:, None, 2] - box1[:, None, 0]
  444. h_pred = box1[:, None, 3] - box1[:, None, 1]
  445. w_gt = box2[:, 2] - box2[:, 0]
  446. h_gt = box2[:, 3] - box2[:, 1]
  447. v = (4 / (torch.pi ** 2)) * torch.pow((torch.atan(w_gt / h_gt) - torch.atan(w_pred / h_pred)), 2)
  448. with torch.no_grad():
  449. alpha = v / (1 - iou + v + eps)
  450. return iou - (centers_distance_squared / diagonal_distance_squared) - alpha * v
  451. def box_diou(box1, box2, eps: float = 1e-7):
  452. """
  453. Return distance intersection-over-union (Jaccard index) between two sets of boxes.
  454. Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with
  455. ``0 <= x1 < x2`` and ``0 <= y1 < y2``.
  456. Args:
  457. boxes1 (Tensor[N, 4]): first set of boxes
  458. boxes2 (Tensor[M, 4]): second set of boxes
  459. eps (float, optional): small number to prevent division by zero. Default: 1e-7
  460. Returns:
  461. Tensor[N, M]: the NxM matrix containing the pairwise distance IoU values
  462. for every element in boxes1 and boxes2
  463. """
  464. def box_area(box):
  465. # box = 4xn
  466. return (box[2] - box[0]) * (box[3] - box[1])
  467. area1 = box_area(box1.T)
  468. area2 = box_area(box2.T)
  469. inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
  470. union = (area1[:, None] + area2 - inter)
  471. iou = inter / union
  472. lti = torch.min(box1[:, None, :2], box2[:, :2])
  473. rbi = torch.max(box1[:, None, 2:], box2[:, 2:])
  474. whi = (rbi - lti).clamp(min=0) # [N,M,2]
  475. diagonal_distance_squared = (whi[:, :, 0] ** 2) + (whi[:, :, 1] ** 2) + eps
  476. # centers of boxes
  477. x_p = (box1[:, None, 0] + box1[:, None, 2]) / 2
  478. y_p = (box1[:, None, 1] + box1[:, None, 3]) / 2
  479. x_g = (box2[:, 0] + box2[:, 2]) / 2
  480. y_g = (box2[:, 1] + box2[:, 3]) / 2
  481. # The distance between boxes' centers squared.
  482. centers_distance_squared = (x_p - x_g) ** 2 + (y_p - y_g) ** 2
  483. # The distance IoU is the IoU penalized by a normalized
  484. # distance between boxes' centers squared.
  485. return iou - (centers_distance_squared / diagonal_distance_squared)
  486. def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
  487. labels=()):
  488. """Runs Non-Maximum Suppression (NMS) on inference results
  489. Returns:
  490. list of detections, on (n,6) tensor per image [xyxy, conf, cls]
  491. """
  492. nc = prediction.shape[2] - 5 # number of classes
  493. xc = prediction[..., 4] > conf_thres # candidates
  494. # Settings
  495. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  496. max_det = 300 # maximum number of detections per image
  497. max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
  498. time_limit = 10.0 # seconds to quit after
  499. redundant = True # require redundant detections
  500. multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
  501. merge = False # use merge-NMS
  502. t = time.time()
  503. output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
  504. for xi, x in enumerate(prediction): # image index, image inference
  505. # Apply constraints
  506. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  507. x = x[xc[xi]] # confidence
  508. # Cat apriori labels if autolabelling
  509. if labels and len(labels[xi]):
  510. l = labels[xi]
  511. v = torch.zeros((len(l), nc + 5), device=x.device)
  512. v[:, :4] = l[:, 1:5] # box
  513. v[:, 4] = 1.0 # conf
  514. v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
  515. x = torch.cat((x, v), 0)
  516. # If none remain process next image
  517. if not x.shape[0]:
  518. continue
  519. # Compute conf
  520. if nc == 1:
  521. x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
  522. # so there is no need to multiplicate.
  523. else:
  524. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  525. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  526. box = xywh2xyxy(x[:, :4])
  527. # Detections matrix nx6 (xyxy, conf, cls)
  528. if multi_label:
  529. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  530. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  531. else: # best class only
  532. conf, j = x[:, 5:].max(1, keepdim=True)
  533. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  534. # Filter by class
  535. if classes is not None:
  536. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  537. # Apply finite constraint
  538. # if not torch.isfinite(x).all():
  539. # x = x[torch.isfinite(x).all(1)]
  540. # Check shape
  541. n = x.shape[0] # number of boxes
  542. if not n: # no boxes
  543. continue
  544. elif n > max_nms: # excess boxes
  545. x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
  546. # Batched NMS
  547. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  548. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  549. i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
  550. if i.shape[0] > max_det: # limit detections
  551. i = i[:max_det]
  552. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  553. # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  554. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  555. weights = iou * scores[None] # box weights
  556. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  557. if redundant:
  558. i = i[iou.sum(1) > 1] # require redundancy
  559. output[xi] = x[i]
  560. if (time.time() - t) > time_limit:
  561. print(f'WARNING: NMS time limit {time_limit}s exceeded')
  562. break # time limit exceeded
  563. return output
  564. def non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
  565. labels=(), kpt_label=False, nc=None, nkpt=None):
  566. """Runs Non-Maximum Suppression (NMS) on inference results
  567. Returns:
  568. list of detections, on (n,6) tensor per image [xyxy, conf, cls]
  569. """
  570. if nc is None:
  571. nc = prediction.shape[2] - 5 if not kpt_label else prediction.shape[2] - 56 # number of classes
  572. xc = prediction[..., 4] > conf_thres # candidates
  573. # Settings
  574. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  575. max_det = 300 # maximum number of detections per image
  576. max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
  577. time_limit = 10.0 # seconds to quit after
  578. redundant = True # require redundant detections
  579. multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
  580. merge = False # use merge-NMS
  581. t = time.time()
  582. output = [torch.zeros((0,6), device=prediction.device)] * prediction.shape[0]
  583. for xi, x in enumerate(prediction): # image index, image inference
  584. # Apply constraints
  585. # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
  586. x = x[xc[xi]] # confidence
  587. # Cat apriori labels if autolabelling
  588. if labels and len(labels[xi]):
  589. l = labels[xi]
  590. v = torch.zeros((len(l), nc + 5), device=x.device)
  591. v[:, :4] = l[:, 1:5] # box
  592. v[:, 4] = 1.0 # conf
  593. v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
  594. x = torch.cat((x, v), 0)
  595. # If none remain process next image
  596. if not x.shape[0]:
  597. continue
  598. # Compute conf
  599. x[:, 5:5+nc] *= x[:, 4:5] # conf = obj_conf * cls_conf
  600. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  601. box = xywh2xyxy(x[:, :4])
  602. # Detections matrix nx6 (xyxy, conf, cls)
  603. if multi_label:
  604. i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
  605. x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
  606. else: # best class only
  607. if not kpt_label:
  608. conf, j = x[:, 5:].max(1, keepdim=True)
  609. x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  610. else:
  611. kpts = x[:, 6:]
  612. conf, j = x[:, 5:6].max(1, keepdim=True)
  613. x = torch.cat((box, conf, j.float(), kpts), 1)[conf.view(-1) > conf_thres]
  614. # Filter by class
  615. if classes is not None:
  616. x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
  617. # Apply finite constraint
  618. # if not torch.isfinite(x).all():
  619. # x = x[torch.isfinite(x).all(1)]
  620. # Check shape
  621. n = x.shape[0] # number of boxes
  622. if not n: # no boxes
  623. continue
  624. elif n > max_nms: # excess boxes
  625. x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
  626. # Batched NMS
  627. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  628. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  629. i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
  630. if i.shape[0] > max_det: # limit detections
  631. i = i[:max_det]
  632. if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
  633. # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
  634. iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
  635. weights = iou * scores[None] # box weights
  636. x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
  637. if redundant:
  638. i = i[iou.sum(1) > 1] # require redundancy
  639. output[xi] = x[i]
  640. if (time.time() - t) > time_limit:
  641. print(f'WARNING: NMS time limit {time_limit}s exceeded')
  642. break # time limit exceeded
  643. return output
  644. def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()
  645. # Strip optimizer from 'f' to finalize training, optionally save as 's'
  646. x = torch.load(f, map_location=torch.device('cpu'))
  647. if x.get('ema'):
  648. x['model'] = x['ema'] # replace model with ema
  649. for k in 'optimizer', 'training_results', 'wandb_id', 'ema', 'updates': # keys
  650. x[k] = None
  651. x['epoch'] = -1
  652. x['model'].half() # to FP16
  653. for p in x['model'].parameters():
  654. p.requires_grad = False
  655. torch.save(x, s or f)
  656. mb = os.path.getsize(s or f) / 1E6 # filesize
  657. print(f"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB")
  658. def print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):
  659. # Print mutation results to evolve.txt (for use with train.py --evolve)
  660. a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
  661. b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
  662. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  663. print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
  664. if bucket:
  665. url = 'gs://%s/evolve.txt' % bucket
  666. if gsutil_getsize(url) > (os.path.getsize('evolve.txt') if os.path.exists('evolve.txt') else 0):
  667. os.system('gsutil cp %s .' % url) # download evolve.txt if larger than local
  668. with open('evolve.txt', 'a') as f: # append result
  669. f.write(c + b + '\n')
  670. x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
  671. x = x[np.argsort(-fitness(x))] # sort
  672. np.savetxt('evolve.txt', x, '%10.3g') # save sort by fitness
  673. # Save yaml
  674. for i, k in enumerate(hyp.keys()):
  675. hyp[k] = float(x[0, i + 7])
  676. with open(yaml_file, 'w') as f:
  677. results = tuple(x[0, :7])
  678. c = '%10.4g' * len(results) % results # results (P, R, mAP@0.5, mAP@0.5:0.95, val_losses x 3)
  679. f.write('# Hyperparameter Evolution Results\n# Generations: %g\n# Metrics: ' % len(x) + c + '\n\n')
  680. yaml.dump(hyp, f, sort_keys=False)
  681. if bucket:
  682. os.system('gsutil cp evolve.txt %s gs://%s' % (yaml_file, bucket)) # upload
  683. def apply_classifier(x, model, img, im0):
  684. # applies a second stage classifier to yolo outputs
  685. im0 = [im0] if isinstance(im0, np.ndarray) else im0
  686. for i, d in enumerate(x): # per image
  687. if d is not None and len(d):
  688. d = d.clone()
  689. # Reshape and pad cutouts
  690. b = xyxy2xywh(d[:, :4]) # boxes
  691. b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
  692. b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
  693. d[:, :4] = xywh2xyxy(b).long()
  694. # Rescale boxes from img_size to im0 size
  695. scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
  696. # Classes
  697. pred_cls1 = d[:, 5].long()
  698. ims = []
  699. for j, a in enumerate(d): # per item
  700. cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
  701. im = cv2.resize(cutout, (224, 224)) # BGR
  702. # cv2.imwrite('test%i.jpg' % j, cutout)
  703. im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  704. im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
  705. im /= 255.0 # 0 - 255 to 0.0 - 1.0
  706. ims.append(im)
  707. pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
  708. x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
  709. return x
  710. def increment_path(path, exist_ok=True, sep=''):
  711. # Increment path, i.e. runs/exp --> runs/exp{sep}0, runs/exp{sep}1 etc.
  712. path = Path(path) # os-agnostic
  713. if (path.exists() and exist_ok) or (not path.exists()):
  714. return str(path)
  715. else:
  716. dirs = glob.glob(f"{path}{sep}*") # similar paths
  717. matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs]
  718. i = [int(m.groups()[0]) for m in matches if m] # indices
  719. n = max(i) + 1 if i else 2 # increment number
  720. return f"{path}{sep}{n}" # update path