datasets.py 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. # Dataset utils and dataloaders
  2. import glob
  3. import logging
  4. import math
  5. import os
  6. import random
  7. import shutil
  8. import time
  9. from itertools import repeat
  10. from multiprocessing.pool import ThreadPool
  11. from pathlib import Path
  12. from threading import Thread
  13. import cv2
  14. import numpy as np
  15. import torch
  16. import torch.nn.functional as F
  17. from PIL import Image, ExifTags
  18. from torch.utils.data import Dataset
  19. from tqdm import tqdm
  20. import pickle
  21. from copy import deepcopy
  22. #from pycocotools import mask as maskUtils
  23. from torchvision.utils import save_image
  24. from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align
  25. from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
  26. resample_segments, clean_str
  27. from utils.torch_utils import torch_distributed_zero_first
  28. # Parameters
  29. help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
  30. img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
  31. vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
  32. logger = logging.getLogger(__name__)
  33. # Get orientation exif tag
  34. for orientation in ExifTags.TAGS.keys():
  35. if ExifTags.TAGS[orientation] == 'Orientation':
  36. break
  37. def get_hash(files):
  38. # Returns a single hash value of a list of files
  39. return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
  40. def exif_size(img):
  41. # Returns exif-corrected PIL size
  42. s = img.size # (width, height)
  43. try:
  44. rotation = dict(img._getexif().items())[orientation]
  45. if rotation == 6: # rotation 270
  46. s = (s[1], s[0])
  47. elif rotation == 8: # rotation 90
  48. s = (s[1], s[0])
  49. except:
  50. pass
  51. return s
  52. def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
  53. rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
  54. # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
  55. with torch_distributed_zero_first(rank):
  56. dataset = LoadImagesAndLabels(path, imgsz, batch_size,
  57. augment=augment, # augment images
  58. hyp=hyp, # augmentation hyperparameters
  59. rect=rect, # rectangular training
  60. cache_images=cache,
  61. single_cls=opt.single_cls,
  62. stride=int(stride),
  63. pad=pad,
  64. image_weights=image_weights,
  65. prefix=prefix)
  66. batch_size = min(batch_size, len(dataset))
  67. nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
  68. sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
  69. loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
  70. # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
  71. dataloader = loader(dataset,
  72. batch_size=batch_size,
  73. num_workers=nw,
  74. sampler=sampler,
  75. pin_memory=True,
  76. collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
  77. return dataloader, dataset
  78. class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
  79. """ Dataloader that reuses workers
  80. Uses same syntax as vanilla DataLoader
  81. """
  82. def __init__(self, *args, **kwargs):
  83. super().__init__(*args, **kwargs)
  84. object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
  85. self.iterator = super().__iter__()
  86. def __len__(self):
  87. return len(self.batch_sampler.sampler)
  88. def __iter__(self):
  89. for i in range(len(self)):
  90. yield next(self.iterator)
  91. class _RepeatSampler(object):
  92. """ Sampler that repeats forever
  93. Args:
  94. sampler (Sampler)
  95. """
  96. def __init__(self, sampler):
  97. self.sampler = sampler
  98. def __iter__(self):
  99. while True:
  100. yield from iter(self.sampler)
  101. class LoadImages: # for inference
  102. def __init__(self, path, img_size=640, stride=32):
  103. p = str(Path(path).absolute()) # os-agnostic absolute path
  104. if '*' in p:
  105. files = sorted(glob.glob(p, recursive=True)) # glob
  106. elif os.path.isdir(p):
  107. files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
  108. elif os.path.isfile(p):
  109. files = [p] # files
  110. else:
  111. raise Exception(f'ERROR: {p} does not exist')
  112. print(files)
  113. images = [x for x in files if x.split('.')[-1].lower() in img_formats]
  114. videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
  115. ni, nv = len(images), len(videos)
  116. self.img_size = img_size
  117. self.stride = stride
  118. self.files = images + videos
  119. self.nf = ni + nv # number of files
  120. self.video_flag = [False] * ni + [True] * nv
  121. self.mode = 'image'
  122. if any(videos):
  123. self.new_video(videos[0]) # new video
  124. else:
  125. self.cap = None
  126. assert self.nf > 0, f'No images or videos found in {p}. ' \
  127. f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
  128. def __iter__(self):
  129. self.count = 0
  130. return self
  131. def __next__(self):
  132. if self.count == self.nf:
  133. raise StopIteration
  134. path = self.files[self.count]
  135. if self.video_flag[self.count]:
  136. # Read video
  137. self.mode = 'video'
  138. ret_val, img0 = self.cap.read()
  139. if not ret_val:
  140. self.count += 1
  141. self.cap.release()
  142. if self.count == self.nf: # last video
  143. raise StopIteration
  144. else:
  145. path = self.files[self.count]
  146. self.new_video(path)
  147. ret_val, img0 = self.cap.read()
  148. self.frame += 1
  149. print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
  150. else:
  151. # Read image
  152. self.count += 1
  153. img0 = cv2.imread(path) # BGR
  154. assert img0 is not None, 'Image Not Found ' + path
  155. #print(f'image {self.count}/{self.nf} {path}: ', end='')
  156. # Padded resize
  157. img = letterbox(img0, self.img_size, stride=self.stride)[0]
  158. # Convert
  159. img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  160. img = np.ascontiguousarray(img)
  161. return path, img, img0, self.cap
  162. def new_video(self, path):
  163. self.frame = 0
  164. self.cap = cv2.VideoCapture(path)
  165. self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
  166. def __len__(self):
  167. return self.nf # number of files
  168. class LoadWebcam: # for inference
  169. def __init__(self, pipe='0', img_size=640, stride=32):
  170. self.img_size = img_size
  171. self.stride = stride
  172. if pipe.isnumeric():
  173. pipe = eval(pipe) # local camera
  174. # pipe = 'rtsp://192.168.1.64/1' # IP camera
  175. # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
  176. # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
  177. self.pipe = pipe
  178. self.cap = cv2.VideoCapture(pipe) # video capture object
  179. self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
  180. def __iter__(self):
  181. self.count = -1
  182. return self
  183. def __next__(self):
  184. self.count += 1
  185. if cv2.waitKey(1) == ord('q'): # q to quit
  186. self.cap.release()
  187. cv2.destroyAllWindows()
  188. raise StopIteration
  189. # Read frame
  190. if self.pipe == 0: # local camera
  191. ret_val, img0 = self.cap.read()
  192. img0 = cv2.flip(img0, 1) # flip left-right
  193. else: # IP camera
  194. n = 0
  195. while True:
  196. n += 1
  197. self.cap.grab()
  198. if n % 30 == 0: # skip frames
  199. ret_val, img0 = self.cap.retrieve()
  200. if ret_val:
  201. break
  202. # Print
  203. assert ret_val, f'Camera Error {self.pipe}'
  204. img_path = 'webcam.jpg'
  205. print(f'webcam {self.count}: ', end='')
  206. # Padded resize
  207. img = letterbox(img0, self.img_size, stride=self.stride)[0]
  208. # Convert
  209. img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  210. img = np.ascontiguousarray(img)
  211. return img_path, img, img0, None
  212. def __len__(self):
  213. return 0
  214. class LoadStreams: # multiple IP or RTSP cameras
  215. def __init__(self, sources='streams.txt', img_size=640, stride=32):
  216. self.mode = 'stream'
  217. self.img_size = img_size
  218. self.stride = stride
  219. if os.path.isfile(sources):
  220. with open(sources, 'r') as f:
  221. sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
  222. else:
  223. sources = [sources]
  224. n = len(sources)
  225. self.imgs = [None] * n
  226. self.sources = [clean_str(x) for x in sources] # clean source names for later
  227. for i, s in enumerate(sources):
  228. # Start the thread to read frames from the video stream
  229. print(f'{i + 1}/{n}: {s}... ', end='')
  230. url = eval(s) if s.isnumeric() else s
  231. if 'youtube.com/' in str(url) or 'youtu.be/' in str(url): # if source is YouTube video
  232. check_requirements(('pafy', 'youtube_dl'))
  233. import pafy
  234. url = pafy.new(url).getbest(preftype="mp4").url
  235. cap = cv2.VideoCapture(url)
  236. assert cap.isOpened(), f'Failed to open {s}'
  237. w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  238. h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  239. self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
  240. _, self.imgs[i] = cap.read() # guarantee first frame
  241. thread = Thread(target=self.update, args=([i, cap]), daemon=True)
  242. print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
  243. thread.start()
  244. print('') # newline
  245. # check for common shapes
  246. s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
  247. self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
  248. if not self.rect:
  249. print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
  250. def update(self, index, cap):
  251. # Read next stream frame in a daemon thread
  252. n = 0
  253. while cap.isOpened():
  254. n += 1
  255. # _, self.imgs[index] = cap.read()
  256. cap.grab()
  257. if n == 4: # read every 4th frame
  258. success, im = cap.retrieve()
  259. self.imgs[index] = im if success else self.imgs[index] * 0
  260. n = 0
  261. time.sleep(1 / self.fps) # wait time
  262. def __iter__(self):
  263. self.count = -1
  264. return self
  265. def __next__(self):
  266. self.count += 1
  267. img0 = self.imgs.copy()
  268. if cv2.waitKey(1) == ord('q'): # q to quit
  269. cv2.destroyAllWindows()
  270. raise StopIteration
  271. # Letterbox
  272. img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
  273. # Stack
  274. img = np.stack(img, 0)
  275. # Convert
  276. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  277. img = np.ascontiguousarray(img)
  278. return self.sources, img, img0, None
  279. def __len__(self):
  280. return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
  281. def img2label_paths(img_paths):
  282. # Define label paths as a function of image paths
  283. sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
  284. return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
  285. class LoadImagesAndLabels(Dataset): # for training/testing
  286. def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
  287. cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
  288. self.img_size = img_size
  289. self.augment = augment
  290. self.hyp = hyp
  291. self.image_weights = image_weights
  292. self.rect = False if image_weights else rect
  293. self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
  294. self.mosaic_border = [-img_size // 2, -img_size // 2]
  295. self.stride = stride
  296. self.path = path
  297. #self.albumentations = Albumentations() if augment else None
  298. try:
  299. f = [] # image files
  300. for p in path if isinstance(path, list) else [path]:
  301. p = Path(p) # os-agnostic
  302. if p.is_dir(): # dir
  303. f += glob.glob(str(p / '**' / '*.*'), recursive=True)
  304. # f = list(p.rglob('**/*.*')) # pathlib
  305. elif p.is_file(): # file
  306. with open(p, 'r') as t:
  307. t = t.read().strip().splitlines()
  308. parent = str(p.parent) + os.sep
  309. f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
  310. # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
  311. else:
  312. raise Exception(f'{prefix}{p} does not exist')
  313. self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
  314. # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
  315. assert self.img_files, f'{prefix}No images found'
  316. except Exception as e:
  317. raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
  318. # Check cache
  319. self.label_files = img2label_paths(self.img_files) # labels
  320. cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
  321. if cache_path.is_file():
  322. cache, exists = torch.load(cache_path), True # load
  323. #if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
  324. # cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
  325. else:
  326. cache, exists = self.cache_labels(cache_path, prefix), False # cache
  327. # Display cache
  328. nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
  329. if exists:
  330. d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
  331. tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
  332. assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
  333. # Read cache
  334. cache.pop('hash') # remove hash
  335. cache.pop('version') # remove version
  336. labels, shapes, self.segments = zip(*cache.values())
  337. self.labels = list(labels)
  338. self.shapes = np.array(shapes, dtype=np.float64)
  339. self.img_files = list(cache.keys()) # update
  340. self.label_files = img2label_paths(cache.keys()) # update
  341. if single_cls:
  342. for x in self.labels:
  343. x[:, 0] = 0
  344. n = len(shapes) # number of images
  345. bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
  346. nb = bi[-1] + 1 # number of batches
  347. self.batch = bi # batch index of image
  348. self.n = n
  349. self.indices = range(n)
  350. # Rectangular Training
  351. if self.rect:
  352. # Sort by aspect ratio
  353. s = self.shapes # wh
  354. ar = s[:, 1] / s[:, 0] # aspect ratio
  355. irect = ar.argsort()
  356. self.img_files = [self.img_files[i] for i in irect]
  357. self.label_files = [self.label_files[i] for i in irect]
  358. self.labels = [self.labels[i] for i in irect]
  359. self.shapes = s[irect] # wh
  360. ar = ar[irect]
  361. # Set training image shapes
  362. shapes = [[1, 1]] * nb
  363. for i in range(nb):
  364. ari = ar[bi == i]
  365. mini, maxi = ari.min(), ari.max()
  366. if maxi < 1:
  367. shapes[i] = [maxi, 1]
  368. elif mini > 1:
  369. shapes[i] = [1, 1 / mini]
  370. self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
  371. # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
  372. self.imgs = [None] * n
  373. if cache_images:
  374. if cache_images == 'disk':
  375. self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
  376. self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
  377. self.im_cache_dir.mkdir(parents=True, exist_ok=True)
  378. gb = 0 # Gigabytes of cached images
  379. self.img_hw0, self.img_hw = [None] * n, [None] * n
  380. results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
  381. pbar = tqdm(enumerate(results), total=n)
  382. for i, x in pbar:
  383. if cache_images == 'disk':
  384. if not self.img_npy[i].exists():
  385. np.save(self.img_npy[i].as_posix(), x[0])
  386. gb += self.img_npy[i].stat().st_size
  387. else:
  388. self.imgs[i], self.img_hw0[i], self.img_hw[i] = x
  389. gb += self.imgs[i].nbytes
  390. pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
  391. pbar.close()
  392. def cache_labels(self, path=Path('./labels.cache'), prefix=''):
  393. # Cache dataset labels, check images and read shapes
  394. x = {} # dict
  395. nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
  396. pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
  397. for i, (im_file, lb_file) in enumerate(pbar):
  398. try:
  399. # verify images
  400. im = Image.open(im_file)
  401. im.verify() # PIL verify
  402. shape = exif_size(im) # image size
  403. segments = [] # instance segments
  404. assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
  405. assert im.format.lower() in img_formats, f'invalid image format {im.format}'
  406. # verify labels
  407. if os.path.isfile(lb_file):
  408. nf += 1 # label found
  409. with open(lb_file, 'r') as f:
  410. l = [x.split() for x in f.read().strip().splitlines()]
  411. if any([len(x) > 8 for x in l]): # is segment
  412. classes = np.array([x[0] for x in l], dtype=np.float32)
  413. segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
  414. l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
  415. l = np.array(l, dtype=np.float32)
  416. if len(l):
  417. assert l.shape[1] == 5, 'labels require 5 columns each'
  418. assert (l >= 0).all(), 'negative labels'
  419. assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
  420. assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
  421. else:
  422. ne += 1 # label empty
  423. l = np.zeros((0, 5), dtype=np.float32)
  424. else:
  425. nm += 1 # label missing
  426. l = np.zeros((0, 5), dtype=np.float32)
  427. x[im_file] = [l, shape, segments]
  428. except Exception as e:
  429. nc += 1
  430. print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
  431. pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
  432. f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
  433. pbar.close()
  434. if nf == 0:
  435. print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
  436. x['hash'] = get_hash(self.label_files + self.img_files)
  437. x['results'] = nf, nm, ne, nc, i + 1
  438. x['version'] = 0.1 # cache version
  439. torch.save(x, path) # save for next time
  440. logging.info(f'{prefix}New cache created: {path}')
  441. return x
  442. def __len__(self):
  443. return len(self.img_files)
  444. # def __iter__(self):
  445. # self.count = -1
  446. # print('ran dataset iter')
  447. # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
  448. # return self
  449. def __getitem__(self, index):
  450. index = self.indices[index] # linear, shuffled, or image_weights
  451. hyp = self.hyp
  452. mosaic = self.mosaic and random.random() < hyp['mosaic']
  453. if mosaic:
  454. # Load mosaic
  455. if random.random() < 0.8:
  456. img, labels = load_mosaic(self, index)
  457. else:
  458. img, labels = load_mosaic9(self, index)
  459. shapes = None
  460. # MixUp https://arxiv.org/pdf/1710.09412.pdf
  461. if random.random() < hyp['mixup']:
  462. if random.random() < 0.8:
  463. img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
  464. else:
  465. img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
  466. r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
  467. img = (img * r + img2 * (1 - r)).astype(np.uint8)
  468. labels = np.concatenate((labels, labels2), 0)
  469. else:
  470. # Load image
  471. img, (h0, w0), (h, w) = load_image(self, index)
  472. # Letterbox
  473. shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
  474. img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
  475. shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
  476. labels = self.labels[index].copy()
  477. if labels.size: # normalized xywh to pixel xyxy format
  478. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
  479. if self.augment:
  480. # Augment imagespace
  481. if not mosaic:
  482. img, labels = random_perspective(img, labels,
  483. degrees=hyp['degrees'],
  484. translate=hyp['translate'],
  485. scale=hyp['scale'],
  486. shear=hyp['shear'],
  487. perspective=hyp['perspective'])
  488. #img, labels = self.albumentations(img, labels)
  489. # Augment colorspace
  490. augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
  491. # Apply cutouts
  492. # if random.random() < 0.9:
  493. # labels = cutout(img, labels)
  494. if random.random() < hyp['paste_in']:
  495. sample_labels, sample_images, sample_masks = [], [], []
  496. while len(sample_labels) < 30:
  497. sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1))
  498. sample_labels += sample_labels_
  499. sample_images += sample_images_
  500. sample_masks += sample_masks_
  501. #print(len(sample_labels))
  502. if len(sample_labels) == 0:
  503. break
  504. labels = pastein(img, labels, sample_labels, sample_images, sample_masks)
  505. nL = len(labels) # number of labels
  506. if nL:
  507. labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
  508. labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
  509. labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
  510. if self.augment:
  511. # flip up-down
  512. if random.random() < hyp['flipud']:
  513. img = np.flipud(img)
  514. if nL:
  515. labels[:, 2] = 1 - labels[:, 2]
  516. # flip left-right
  517. if random.random() < hyp['fliplr']:
  518. img = np.fliplr(img)
  519. if nL:
  520. labels[:, 1] = 1 - labels[:, 1]
  521. labels_out = torch.zeros((nL, 6))
  522. if nL:
  523. labels_out[:, 1:] = torch.from_numpy(labels)
  524. # Convert
  525. img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  526. img = np.ascontiguousarray(img)
  527. return torch.from_numpy(img), labels_out, self.img_files[index], shapes
  528. @staticmethod
  529. def collate_fn(batch):
  530. img, label, path, shapes = zip(*batch) # transposed
  531. for i, l in enumerate(label):
  532. l[:, 0] = i # add target image index for build_targets()
  533. return torch.stack(img, 0), torch.cat(label, 0), path, shapes
  534. @staticmethod
  535. def collate_fn4(batch):
  536. img, label, path, shapes = zip(*batch) # transposed
  537. n = len(shapes) // 4
  538. img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
  539. ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
  540. wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
  541. s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
  542. for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
  543. i *= 4
  544. if random.random() < 0.5:
  545. im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
  546. 0].type(img[i].type())
  547. l = label[i]
  548. else:
  549. im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
  550. l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
  551. img4.append(im)
  552. label4.append(l)
  553. for i, l in enumerate(label4):
  554. l[:, 0] = i # add target image index for build_targets()
  555. return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
  556. # Ancillary functions --------------------------------------------------------------------------------------------------
  557. def load_image(self, index):
  558. # loads 1 image from dataset, returns img, original hw, resized hw
  559. img = self.imgs[index]
  560. if img is None: # not cached
  561. path = self.img_files[index]
  562. img = cv2.imread(path) # BGR
  563. assert img is not None, 'Image Not Found ' + path
  564. h0, w0 = img.shape[:2] # orig hw
  565. r = self.img_size / max(h0, w0) # resize image to img_size
  566. if r != 1: # always resize down, only resize up if training with augmentation
  567. interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
  568. img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
  569. return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
  570. else:
  571. return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
  572. def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
  573. r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
  574. hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
  575. dtype = img.dtype # uint8
  576. x = np.arange(0, 256, dtype=np.int16)
  577. lut_hue = ((x * r[0]) % 180).astype(dtype)
  578. lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
  579. lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
  580. img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
  581. cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
  582. def hist_equalize(img, clahe=True, bgr=False):
  583. # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
  584. yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
  585. if clahe:
  586. c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
  587. yuv[:, :, 0] = c.apply(yuv[:, :, 0])
  588. else:
  589. yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
  590. return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
  591. def load_mosaic(self, index):
  592. # loads images in a 4-mosaic
  593. labels4, segments4 = [], []
  594. s = self.img_size
  595. yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
  596. indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
  597. for i, index in enumerate(indices):
  598. # Load image
  599. img, _, (h, w) = load_image(self, index)
  600. # place img in img4
  601. if i == 0: # top left
  602. img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  603. x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
  604. x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
  605. elif i == 1: # top right
  606. x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
  607. x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
  608. elif i == 2: # bottom left
  609. x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
  610. x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
  611. elif i == 3: # bottom right
  612. x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
  613. x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
  614. img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
  615. padw = x1a - x1b
  616. padh = y1a - y1b
  617. # Labels
  618. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  619. if labels.size:
  620. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
  621. segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
  622. labels4.append(labels)
  623. segments4.extend(segments)
  624. # Concat/clip labels
  625. labels4 = np.concatenate(labels4, 0)
  626. for x in (labels4[:, 1:], *segments4):
  627. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  628. # img4, labels4 = replicate(img4, labels4) # replicate
  629. # Augment
  630. #img4, labels4, segments4 = remove_background(img4, labels4, segments4)
  631. #sample_segments(img4, labels4, segments4, probability=self.hyp['copy_paste'])
  632. img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste'])
  633. img4, labels4 = random_perspective(img4, labels4, segments4,
  634. degrees=self.hyp['degrees'],
  635. translate=self.hyp['translate'],
  636. scale=self.hyp['scale'],
  637. shear=self.hyp['shear'],
  638. perspective=self.hyp['perspective'],
  639. border=self.mosaic_border) # border to remove
  640. return img4, labels4
  641. def load_mosaic9(self, index):
  642. # loads images in a 9-mosaic
  643. labels9, segments9 = [], []
  644. s = self.img_size
  645. indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
  646. for i, index in enumerate(indices):
  647. # Load image
  648. img, _, (h, w) = load_image(self, index)
  649. # place img in img9
  650. if i == 0: # center
  651. img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  652. h0, w0 = h, w
  653. c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
  654. elif i == 1: # top
  655. c = s, s - h, s + w, s
  656. elif i == 2: # top right
  657. c = s + wp, s - h, s + wp + w, s
  658. elif i == 3: # right
  659. c = s + w0, s, s + w0 + w, s + h
  660. elif i == 4: # bottom right
  661. c = s + w0, s + hp, s + w0 + w, s + hp + h
  662. elif i == 5: # bottom
  663. c = s + w0 - w, s + h0, s + w0, s + h0 + h
  664. elif i == 6: # bottom left
  665. c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
  666. elif i == 7: # left
  667. c = s - w, s + h0 - h, s, s + h0
  668. elif i == 8: # top left
  669. c = s - w, s + h0 - hp - h, s, s + h0 - hp
  670. padx, pady = c[:2]
  671. x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
  672. # Labels
  673. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  674. if labels.size:
  675. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
  676. segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
  677. labels9.append(labels)
  678. segments9.extend(segments)
  679. # Image
  680. img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
  681. hp, wp = h, w # height, width previous
  682. # Offset
  683. yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
  684. img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
  685. # Concat/clip labels
  686. labels9 = np.concatenate(labels9, 0)
  687. labels9[:, [1, 3]] -= xc
  688. labels9[:, [2, 4]] -= yc
  689. c = np.array([xc, yc]) # centers
  690. segments9 = [x - c for x in segments9]
  691. for x in (labels9[:, 1:], *segments9):
  692. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  693. # img9, labels9 = replicate(img9, labels9) # replicate
  694. # Augment
  695. #img9, labels9, segments9 = remove_background(img9, labels9, segments9)
  696. img9, labels9, segments9 = copy_paste(img9, labels9, segments9, probability=self.hyp['copy_paste'])
  697. img9, labels9 = random_perspective(img9, labels9, segments9,
  698. degrees=self.hyp['degrees'],
  699. translate=self.hyp['translate'],
  700. scale=self.hyp['scale'],
  701. shear=self.hyp['shear'],
  702. perspective=self.hyp['perspective'],
  703. border=self.mosaic_border) # border to remove
  704. return img9, labels9
  705. def load_samples(self, index):
  706. # loads images in a 4-mosaic
  707. labels4, segments4 = [], []
  708. s = self.img_size
  709. yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
  710. indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
  711. for i, index in enumerate(indices):
  712. # Load image
  713. img, _, (h, w) = load_image(self, index)
  714. # place img in img4
  715. if i == 0: # top left
  716. img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  717. x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
  718. x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
  719. elif i == 1: # top right
  720. x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
  721. x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
  722. elif i == 2: # bottom left
  723. x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
  724. x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
  725. elif i == 3: # bottom right
  726. x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
  727. x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
  728. img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
  729. padw = x1a - x1b
  730. padh = y1a - y1b
  731. # Labels
  732. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  733. if labels.size:
  734. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
  735. segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
  736. labels4.append(labels)
  737. segments4.extend(segments)
  738. # Concat/clip labels
  739. labels4 = np.concatenate(labels4, 0)
  740. for x in (labels4[:, 1:], *segments4):
  741. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  742. # img4, labels4 = replicate(img4, labels4) # replicate
  743. # Augment
  744. #img4, labels4, segments4 = remove_background(img4, labels4, segments4)
  745. sample_labels, sample_images, sample_masks = sample_segments(img4, labels4, segments4, probability=0.5)
  746. return sample_labels, sample_images, sample_masks
  747. def copy_paste(img, labels, segments, probability=0.5):
  748. # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
  749. n = len(segments)
  750. if probability and n:
  751. h, w, c = img.shape # height, width, channels
  752. im_new = np.zeros(img.shape, np.uint8)
  753. for j in random.sample(range(n), k=round(probability * n)):
  754. l, s = labels[j], segments[j]
  755. box = w - l[3], l[2], w - l[1], l[4]
  756. ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
  757. if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
  758. labels = np.concatenate((labels, [[l[0], *box]]), 0)
  759. segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
  760. cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
  761. result = cv2.bitwise_and(src1=img, src2=im_new)
  762. result = cv2.flip(result, 1) # augment segments (flip left-right)
  763. i = result > 0 # pixels to replace
  764. # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch
  765. img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
  766. return img, labels, segments
  767. def remove_background(img, labels, segments):
  768. # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
  769. n = len(segments)
  770. h, w, c = img.shape # height, width, channels
  771. im_new = np.zeros(img.shape, np.uint8)
  772. img_new = np.ones(img.shape, np.uint8) * 114
  773. for j in range(n):
  774. cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
  775. result = cv2.bitwise_and(src1=img, src2=im_new)
  776. i = result > 0 # pixels to replace
  777. img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
  778. return img_new, labels, segments
  779. def sample_segments(img, labels, segments, probability=0.5):
  780. # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
  781. n = len(segments)
  782. sample_labels = []
  783. sample_images = []
  784. sample_masks = []
  785. if probability and n:
  786. h, w, c = img.shape # height, width, channels
  787. for j in random.sample(range(n), k=round(probability * n)):
  788. l, s = labels[j], segments[j]
  789. box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1)
  790. #print(box)
  791. if (box[2] <= box[0]) or (box[3] <= box[1]):
  792. continue
  793. sample_labels.append(l[0])
  794. mask = np.zeros(img.shape, np.uint8)
  795. cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
  796. sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:])
  797. result = cv2.bitwise_and(src1=img, src2=mask)
  798. i = result > 0 # pixels to replace
  799. mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
  800. #print(box)
  801. sample_images.append(mask[box[1]:box[3],box[0]:box[2],:])
  802. return sample_labels, sample_images, sample_masks
  803. def replicate(img, labels):
  804. # Replicate labels
  805. h, w = img.shape[:2]
  806. boxes = labels[:, 1:].astype(int)
  807. x1, y1, x2, y2 = boxes.T
  808. s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
  809. for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
  810. x1b, y1b, x2b, y2b = boxes[i]
  811. bh, bw = y2b - y1b, x2b - x1b
  812. yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
  813. x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
  814. img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
  815. labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
  816. return img, labels
  817. def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
  818. # Resize and pad image while meeting stride-multiple constraints
  819. shape = img.shape[:2] # current shape [height, width]
  820. if isinstance(new_shape, int):
  821. new_shape = (new_shape, new_shape)
  822. # Scale ratio (new / old)
  823. r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
  824. if not scaleup: # only scale down, do not scale up (for better test mAP)
  825. r = min(r, 1.0)
  826. # Compute padding
  827. ratio = r, r # width, height ratios
  828. new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
  829. dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
  830. if auto: # minimum rectangle
  831. dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
  832. elif scaleFill: # stretch
  833. dw, dh = 0.0, 0.0
  834. new_unpad = (new_shape[1], new_shape[0])
  835. ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
  836. dw /= 2 # divide padding into 2 sides
  837. dh /= 2
  838. if shape[::-1] != new_unpad: # resize
  839. img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
  840. top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
  841. left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
  842. img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
  843. return img, ratio, (dw, dh)
  844. def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
  845. border=(0, 0)):
  846. # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
  847. # targets = [cls, xyxy]
  848. height = img.shape[0] + border[0] * 2 # shape(h,w,c)
  849. width = img.shape[1] + border[1] * 2
  850. # Center
  851. C = np.eye(3)
  852. C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
  853. C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
  854. # Perspective
  855. P = np.eye(3)
  856. P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
  857. P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
  858. # Rotation and Scale
  859. R = np.eye(3)
  860. a = random.uniform(-degrees, degrees)
  861. # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
  862. s = random.uniform(1 - scale, 1.1 + scale)
  863. # s = 2 ** random.uniform(-scale, scale)
  864. R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
  865. # Shear
  866. S = np.eye(3)
  867. S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
  868. S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
  869. # Translation
  870. T = np.eye(3)
  871. T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
  872. T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
  873. # Combined rotation matrix
  874. M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
  875. if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
  876. if perspective:
  877. img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
  878. else: # affine
  879. img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
  880. # Visualize
  881. # import matplotlib.pyplot as plt
  882. # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
  883. # ax[0].imshow(img[:, :, ::-1]) # base
  884. # ax[1].imshow(img2[:, :, ::-1]) # warped
  885. # Transform label coordinates
  886. n = len(targets)
  887. if n:
  888. use_segments = any(x.any() for x in segments)
  889. new = np.zeros((n, 4))
  890. if use_segments: # warp segments
  891. segments = resample_segments(segments) # upsample
  892. for i, segment in enumerate(segments):
  893. xy = np.ones((len(segment), 3))
  894. xy[:, :2] = segment
  895. xy = xy @ M.T # transform
  896. xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
  897. # clip
  898. new[i] = segment2box(xy, width, height)
  899. else: # warp boxes
  900. xy = np.ones((n * 4, 3))
  901. xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
  902. xy = xy @ M.T # transform
  903. xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
  904. # create new boxes
  905. x = xy[:, [0, 2, 4, 6]]
  906. y = xy[:, [1, 3, 5, 7]]
  907. new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
  908. # clip
  909. new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
  910. new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
  911. # filter candidates
  912. i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
  913. targets = targets[i]
  914. targets[:, 1:5] = new[i]
  915. return img, targets
  916. def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
  917. # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
  918. w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
  919. w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
  920. ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
  921. return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
  922. def bbox_ioa(box1, box2):
  923. # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
  924. box2 = box2.transpose()
  925. # Get the coordinates of bounding boxes
  926. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  927. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  928. # Intersection area
  929. inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
  930. (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
  931. # box2 area
  932. box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
  933. # Intersection over box2 area
  934. return inter_area / box2_area
  935. def cutout(image, labels):
  936. # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
  937. h, w = image.shape[:2]
  938. # create random masks
  939. scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
  940. for s in scales:
  941. mask_h = random.randint(1, int(h * s))
  942. mask_w = random.randint(1, int(w * s))
  943. # box
  944. xmin = max(0, random.randint(0, w) - mask_w // 2)
  945. ymin = max(0, random.randint(0, h) - mask_h // 2)
  946. xmax = min(w, xmin + mask_w)
  947. ymax = min(h, ymin + mask_h)
  948. # apply random color mask
  949. image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
  950. # return unobscured labels
  951. if len(labels) and s > 0.03:
  952. box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
  953. ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
  954. labels = labels[ioa < 0.60] # remove >60% obscured labels
  955. return labels
  956. def pastein(image, labels, sample_labels, sample_images, sample_masks):
  957. # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
  958. h, w = image.shape[:2]
  959. # create random masks
  960. scales = [0.75] * 2 + [0.5] * 4 + [0.25] * 4 + [0.125] * 4 + [0.0625] * 6 # image size fraction
  961. for s in scales:
  962. if random.random() < 0.2:
  963. continue
  964. mask_h = random.randint(1, int(h * s))
  965. mask_w = random.randint(1, int(w * s))
  966. # box
  967. xmin = max(0, random.randint(0, w) - mask_w // 2)
  968. ymin = max(0, random.randint(0, h) - mask_h // 2)
  969. xmax = min(w, xmin + mask_w)
  970. ymax = min(h, ymin + mask_h)
  971. box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
  972. if len(labels):
  973. ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
  974. else:
  975. ioa = np.zeros(1)
  976. if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels
  977. sel_ind = random.randint(0, len(sample_labels)-1)
  978. #print(len(sample_labels))
  979. #print(sel_ind)
  980. #print((xmax-xmin, ymax-ymin))
  981. #print(image[ymin:ymax, xmin:xmax].shape)
  982. #print([[sample_labels[sel_ind], *box]])
  983. #print(labels.shape)
  984. hs, ws, cs = sample_images[sel_ind].shape
  985. r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws)
  986. r_w = int(ws*r_scale)
  987. r_h = int(hs*r_scale)
  988. if (r_w > 10) and (r_h > 10):
  989. r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h))
  990. r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h))
  991. temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w]
  992. m_ind = r_mask > 0
  993. if m_ind.astype(np.int).sum() > 60:
  994. temp_crop[m_ind] = r_image[m_ind]
  995. #print(sample_labels[sel_ind])
  996. #print(sample_images[sel_ind].shape)
  997. #print(temp_crop.shape)
  998. box = np.array([xmin, ymin, xmin+r_w, ymin+r_h], dtype=np.float32)
  999. if len(labels):
  1000. labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0)
  1001. else:
  1002. labels = np.array([[sample_labels[sel_ind], *box]])
  1003. image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop
  1004. return labels
  1005. class Albumentations:
  1006. # YOLOv5 Albumentations class (optional, only used if package is installed)
  1007. def __init__(self):
  1008. self.transform = None
  1009. import albumentations as A
  1010. self.transform = A.Compose([
  1011. A.CLAHE(p=0.01),
  1012. A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.01),
  1013. A.RandomGamma(gamma_limit=[80, 120], p=0.01),
  1014. A.Blur(p=0.01),
  1015. A.MedianBlur(p=0.01),
  1016. A.ToGray(p=0.01),
  1017. A.ImageCompression(quality_lower=75, p=0.01),],
  1018. bbox_params=A.BboxParams(format='pascal_voc', label_fields=['class_labels']))
  1019. #logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
  1020. def __call__(self, im, labels, p=1.0):
  1021. if self.transform and random.random() < p:
  1022. new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
  1023. im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
  1024. return im, labels
  1025. def create_folder(path='./new'):
  1026. # Create folder
  1027. if os.path.exists(path):
  1028. shutil.rmtree(path) # delete output folder
  1029. os.makedirs(path) # make new output folder
  1030. def flatten_recursive(path='../coco'):
  1031. # Flatten a recursive directory by bringing all files to top level
  1032. new_path = Path(path + '_flat')
  1033. create_folder(new_path)
  1034. for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
  1035. shutil.copyfile(file, new_path / Path(file).name)
  1036. def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_boxes('../coco128')
  1037. # Convert detection dataset into classification dataset, with one directory per class
  1038. path = Path(path) # images dir
  1039. shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
  1040. files = list(path.rglob('*.*'))
  1041. n = len(files) # number of files
  1042. for im_file in tqdm(files, total=n):
  1043. if im_file.suffix[1:] in img_formats:
  1044. # image
  1045. im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
  1046. h, w = im.shape[:2]
  1047. # labels
  1048. lb_file = Path(img2label_paths([str(im_file)])[0])
  1049. if Path(lb_file).exists():
  1050. with open(lb_file, 'r') as f:
  1051. lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
  1052. for j, x in enumerate(lb):
  1053. c = int(x[0]) # class
  1054. f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
  1055. if not f.parent.is_dir():
  1056. f.parent.mkdir(parents=True)
  1057. b = x[1:] * [w, h, w, h] # box
  1058. # b[2:] = b[2:].max() # rectangle to square
  1059. b[2:] = b[2:] * 1.2 + 3 # pad
  1060. b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
  1061. b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
  1062. b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
  1063. assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
  1064. def autosplit(path='../coco', weights=(0.9, 0.1, 0.0), annotated_only=False):
  1065. """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
  1066. Usage: from utils.datasets import *; autosplit('../coco')
  1067. Arguments
  1068. path: Path to images directory
  1069. weights: Train, val, test weights (list)
  1070. annotated_only: Only use images with an annotated txt file
  1071. """
  1072. path = Path(path) # images dir
  1073. files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
  1074. n = len(files) # number of files
  1075. indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
  1076. txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
  1077. [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
  1078. print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
  1079. for i, img in tqdm(zip(indices, files), total=n):
  1080. if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
  1081. with open(path / txt[i], 'a') as f:
  1082. f.write(str(img) + '\n') # add image to txt file
  1083. def load_segmentations(self, index):
  1084. key = '/work/handsomejw66/coco17/' + self.img_files[index]
  1085. #print(key)
  1086. # /work/handsomejw66/coco17/
  1087. return self.segs[key]