datasets.py 55 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320
  1. # Dataset utils and dataloaders
  2. import glob
  3. import logging
  4. import math
  5. import os
  6. import random
  7. import shutil
  8. import time
  9. from itertools import repeat
  10. from multiprocessing.pool import ThreadPool
  11. from pathlib import Path
  12. from threading import Thread
  13. import cv2
  14. import numpy as np
  15. import torch
  16. import torch.nn.functional as F
  17. from PIL import Image, ExifTags
  18. from torch.utils.data import Dataset
  19. from tqdm import tqdm
  20. import pickle
  21. from copy import deepcopy
  22. #from pycocotools import mask as maskUtils
  23. from torchvision.utils import save_image
  24. from torchvision.ops import roi_pool, roi_align, ps_roi_pool, ps_roi_align
  25. from utils.general import check_requirements, xyxy2xywh, xywh2xyxy, xywhn2xyxy, xyn2xy, segment2box, segments2boxes, \
  26. resample_segments, clean_str
  27. from utils.torch_utils import torch_distributed_zero_first
  28. # Parameters
  29. help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
  30. img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
  31. vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes
  32. logger = logging.getLogger(__name__)
  33. # Get orientation exif tag
  34. for orientation in ExifTags.TAGS.keys():
  35. if ExifTags.TAGS[orientation] == 'Orientation':
  36. break
  37. def get_hash(files):
  38. # Returns a single hash value of a list of files
  39. return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
  40. def exif_size(img):
  41. # Returns exif-corrected PIL size
  42. s = img.size # (width, height)
  43. try:
  44. rotation = dict(img._getexif().items())[orientation]
  45. if rotation == 6: # rotation 270
  46. s = (s[1], s[0])
  47. elif rotation == 8: # rotation 90
  48. s = (s[1], s[0])
  49. except:
  50. pass
  51. return s
  52. def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
  53. rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):
  54. # Make sure only the first process in DDP process the dataset first, and the following others can use the cache
  55. with torch_distributed_zero_first(rank):
  56. dataset = LoadImagesAndLabels(path, imgsz, batch_size,
  57. augment=augment, # augment images
  58. hyp=hyp, # augmentation hyperparameters
  59. rect=rect, # rectangular training
  60. cache_images=cache,
  61. single_cls=opt.single_cls,
  62. stride=int(stride),
  63. pad=pad,
  64. image_weights=image_weights,
  65. prefix=prefix)
  66. batch_size = min(batch_size, len(dataset))
  67. nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers
  68. sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None
  69. loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader
  70. # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()
  71. dataloader = loader(dataset,
  72. batch_size=batch_size,
  73. num_workers=nw,
  74. sampler=sampler,
  75. pin_memory=True,
  76. collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)
  77. return dataloader, dataset
  78. class InfiniteDataLoader(torch.utils.data.dataloader.DataLoader):
  79. """ Dataloader that reuses workers
  80. Uses same syntax as vanilla DataLoader
  81. """
  82. def __init__(self, *args, **kwargs):
  83. super().__init__(*args, **kwargs)
  84. object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler))
  85. self.iterator = super().__iter__()
  86. def __len__(self):
  87. return len(self.batch_sampler.sampler)
  88. def __iter__(self):
  89. for i in range(len(self)):
  90. yield next(self.iterator)
  91. class _RepeatSampler(object):
  92. """ Sampler that repeats forever
  93. Args:
  94. sampler (Sampler)
  95. """
  96. def __init__(self, sampler):
  97. self.sampler = sampler
  98. def __iter__(self):
  99. while True:
  100. yield from iter(self.sampler)
  101. class LoadImages: # for inference
  102. def __init__(self, path, img_size=640, stride=32):
  103. p = str(Path(path).absolute()) # os-agnostic absolute path
  104. if '*' in p:
  105. files = sorted(glob.glob(p, recursive=True)) # glob
  106. elif os.path.isdir(p):
  107. files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
  108. elif os.path.isfile(p):
  109. files = [p] # files
  110. else:
  111. raise Exception(f'ERROR: {p} does not exist')
  112. images = [x for x in files if x.split('.')[-1].lower() in img_formats]
  113. videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
  114. ni, nv = len(images), len(videos)
  115. self.img_size = img_size
  116. self.stride = stride
  117. self.files = images + videos
  118. self.nf = ni + nv # number of files
  119. self.video_flag = [False] * ni + [True] * nv
  120. self.mode = 'image'
  121. if any(videos):
  122. self.new_video(videos[0]) # new video
  123. else:
  124. self.cap = None
  125. assert self.nf > 0, f'No images or videos found in {p}. ' \
  126. f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
  127. def __iter__(self):
  128. self.count = 0
  129. return self
  130. def __next__(self):
  131. if self.count == self.nf:
  132. raise StopIteration
  133. path = self.files[self.count]
  134. if self.video_flag[self.count]:
  135. # Read video
  136. self.mode = 'video'
  137. ret_val, img0 = self.cap.read()
  138. if not ret_val:
  139. self.count += 1
  140. self.cap.release()
  141. if self.count == self.nf: # last video
  142. raise StopIteration
  143. else:
  144. path = self.files[self.count]
  145. self.new_video(path)
  146. ret_val, img0 = self.cap.read()
  147. self.frame += 1
  148. print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: ', end='')
  149. else:
  150. # Read image
  151. self.count += 1
  152. img0 = cv2.imread(path) # BGR
  153. assert img0 is not None, 'Image Not Found ' + path
  154. #print(f'image {self.count}/{self.nf} {path}: ', end='')
  155. # Padded resize
  156. img = letterbox(img0, self.img_size, stride=self.stride)[0]
  157. # Convert
  158. img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  159. img = np.ascontiguousarray(img)
  160. return path, img, img0, self.cap
  161. def new_video(self, path):
  162. self.frame = 0
  163. self.cap = cv2.VideoCapture(path)
  164. self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
  165. def __len__(self):
  166. return self.nf # number of files
  167. class LoadWebcam: # for inference
  168. def __init__(self, pipe='0', img_size=640, stride=32):
  169. self.img_size = img_size
  170. self.stride = stride
  171. if pipe.isnumeric():
  172. pipe = eval(pipe) # local camera
  173. # pipe = 'rtsp://192.168.1.64/1' # IP camera
  174. # pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
  175. # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
  176. self.pipe = pipe
  177. self.cap = cv2.VideoCapture(pipe) # video capture object
  178. self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
  179. def __iter__(self):
  180. self.count = -1
  181. return self
  182. def __next__(self):
  183. self.count += 1
  184. if cv2.waitKey(1) == ord('q'): # q to quit
  185. self.cap.release()
  186. cv2.destroyAllWindows()
  187. raise StopIteration
  188. # Read frame
  189. if self.pipe == 0: # local camera
  190. ret_val, img0 = self.cap.read()
  191. img0 = cv2.flip(img0, 1) # flip left-right
  192. else: # IP camera
  193. n = 0
  194. while True:
  195. n += 1
  196. self.cap.grab()
  197. if n % 30 == 0: # skip frames
  198. ret_val, img0 = self.cap.retrieve()
  199. if ret_val:
  200. break
  201. # Print
  202. assert ret_val, f'Camera Error {self.pipe}'
  203. img_path = 'webcam.jpg'
  204. print(f'webcam {self.count}: ', end='')
  205. # Padded resize
  206. img = letterbox(img0, self.img_size, stride=self.stride)[0]
  207. # Convert
  208. img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  209. img = np.ascontiguousarray(img)
  210. return img_path, img, img0, None
  211. def __len__(self):
  212. return 0
  213. class LoadStreams: # multiple IP or RTSP cameras
  214. def __init__(self, sources='streams.txt', img_size=640, stride=32):
  215. self.mode = 'stream'
  216. self.img_size = img_size
  217. self.stride = stride
  218. if os.path.isfile(sources):
  219. with open(sources, 'r') as f:
  220. sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
  221. else:
  222. sources = [sources]
  223. n = len(sources)
  224. self.imgs = [None] * n
  225. self.sources = [clean_str(x) for x in sources] # clean source names for later
  226. for i, s in enumerate(sources):
  227. # Start the thread to read frames from the video stream
  228. print(f'{i + 1}/{n}: {s}... ', end='')
  229. url = eval(s) if s.isnumeric() else s
  230. if 'youtube.com/' in str(url) or 'youtu.be/' in str(url): # if source is YouTube video
  231. check_requirements(('pafy', 'youtube_dl'))
  232. import pafy
  233. url = pafy.new(url).getbest(preftype="mp4").url
  234. cap = cv2.VideoCapture(url)
  235. assert cap.isOpened(), f'Failed to open {s}'
  236. w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  237. h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  238. self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
  239. _, self.imgs[i] = cap.read() # guarantee first frame
  240. thread = Thread(target=self.update, args=([i, cap]), daemon=True)
  241. print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
  242. thread.start()
  243. print('') # newline
  244. # check for common shapes
  245. s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
  246. self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
  247. if not self.rect:
  248. print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
  249. def update(self, index, cap):
  250. # Read next stream frame in a daemon thread
  251. n = 0
  252. while cap.isOpened():
  253. n += 1
  254. # _, self.imgs[index] = cap.read()
  255. cap.grab()
  256. if n == 4: # read every 4th frame
  257. success, im = cap.retrieve()
  258. self.imgs[index] = im if success else self.imgs[index] * 0
  259. n = 0
  260. time.sleep(1 / self.fps) # wait time
  261. def __iter__(self):
  262. self.count = -1
  263. return self
  264. def __next__(self):
  265. self.count += 1
  266. img0 = self.imgs.copy()
  267. if cv2.waitKey(1) == ord('q'): # q to quit
  268. cv2.destroyAllWindows()
  269. raise StopIteration
  270. # Letterbox
  271. img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
  272. # Stack
  273. img = np.stack(img, 0)
  274. # Convert
  275. img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
  276. img = np.ascontiguousarray(img)
  277. return self.sources, img, img0, None
  278. def __len__(self):
  279. return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
  280. def img2label_paths(img_paths):
  281. # Define label paths as a function of image paths
  282. sa, sb = os.sep + 'images' + os.sep, os.sep + 'labels' + os.sep # /images/, /labels/ substrings
  283. return ['txt'.join(x.replace(sa, sb, 1).rsplit(x.split('.')[-1], 1)) for x in img_paths]
  284. class LoadImagesAndLabels(Dataset): # for training/testing
  285. def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
  286. cache_images=False, single_cls=False, stride=32, pad=0.0, prefix=''):
  287. self.img_size = img_size
  288. self.augment = augment
  289. self.hyp = hyp
  290. self.image_weights = image_weights
  291. self.rect = False if image_weights else rect
  292. self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
  293. self.mosaic_border = [-img_size // 2, -img_size // 2]
  294. self.stride = stride
  295. self.path = path
  296. #self.albumentations = Albumentations() if augment else None
  297. try:
  298. f = [] # image files
  299. for p in path if isinstance(path, list) else [path]:
  300. p = Path(p) # os-agnostic
  301. if p.is_dir(): # dir
  302. f += glob.glob(str(p / '**' / '*.*'), recursive=True)
  303. # f = list(p.rglob('**/*.*')) # pathlib
  304. elif p.is_file(): # file
  305. with open(p, 'r') as t:
  306. t = t.read().strip().splitlines()
  307. parent = str(p.parent) + os.sep
  308. f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
  309. # f += [p.parent / x.lstrip(os.sep) for x in t] # local to global path (pathlib)
  310. else:
  311. raise Exception(f'{prefix}{p} does not exist')
  312. self.img_files = sorted([x.replace('/', os.sep) for x in f if x.split('.')[-1].lower() in img_formats])
  313. # self.img_files = sorted([x for x in f if x.suffix[1:].lower() in img_formats]) # pathlib
  314. assert self.img_files, f'{prefix}No images found'
  315. except Exception as e:
  316. raise Exception(f'{prefix}Error loading data from {path}: {e}\nSee {help_url}')
  317. # Check cache
  318. self.label_files = img2label_paths(self.img_files) # labels
  319. cache_path = (p if p.is_file() else Path(self.label_files[0]).parent).with_suffix('.cache') # cached labels
  320. if cache_path.is_file():
  321. cache, exists = torch.load(cache_path), True # load
  322. #if cache['hash'] != get_hash(self.label_files + self.img_files) or 'version' not in cache: # changed
  323. # cache, exists = self.cache_labels(cache_path, prefix), False # re-cache
  324. else:
  325. cache, exists = self.cache_labels(cache_path, prefix), False # cache
  326. # Display cache
  327. nf, nm, ne, nc, n = cache.pop('results') # found, missing, empty, corrupted, total
  328. if exists:
  329. d = f"Scanning '{cache_path}' images and labels... {nf} found, {nm} missing, {ne} empty, {nc} corrupted"
  330. tqdm(None, desc=prefix + d, total=n, initial=n) # display cache results
  331. assert nf > 0 or not augment, f'{prefix}No labels in {cache_path}. Can not train without labels. See {help_url}'
  332. # Read cache
  333. cache.pop('hash') # remove hash
  334. cache.pop('version') # remove version
  335. labels, shapes, self.segments = zip(*cache.values())
  336. self.labels = list(labels)
  337. self.shapes = np.array(shapes, dtype=np.float64)
  338. self.img_files = list(cache.keys()) # update
  339. self.label_files = img2label_paths(cache.keys()) # update
  340. if single_cls:
  341. for x in self.labels:
  342. x[:, 0] = 0
  343. n = len(shapes) # number of images
  344. bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
  345. nb = bi[-1] + 1 # number of batches
  346. self.batch = bi # batch index of image
  347. self.n = n
  348. self.indices = range(n)
  349. # Rectangular Training
  350. if self.rect:
  351. # Sort by aspect ratio
  352. s = self.shapes # wh
  353. ar = s[:, 1] / s[:, 0] # aspect ratio
  354. irect = ar.argsort()
  355. self.img_files = [self.img_files[i] for i in irect]
  356. self.label_files = [self.label_files[i] for i in irect]
  357. self.labels = [self.labels[i] for i in irect]
  358. self.shapes = s[irect] # wh
  359. ar = ar[irect]
  360. # Set training image shapes
  361. shapes = [[1, 1]] * nb
  362. for i in range(nb):
  363. ari = ar[bi == i]
  364. mini, maxi = ari.min(), ari.max()
  365. if maxi < 1:
  366. shapes[i] = [maxi, 1]
  367. elif mini > 1:
  368. shapes[i] = [1, 1 / mini]
  369. self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
  370. # Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
  371. self.imgs = [None] * n
  372. if cache_images:
  373. if cache_images == 'disk':
  374. self.im_cache_dir = Path(Path(self.img_files[0]).parent.as_posix() + '_npy')
  375. self.img_npy = [self.im_cache_dir / Path(f).with_suffix('.npy').name for f in self.img_files]
  376. self.im_cache_dir.mkdir(parents=True, exist_ok=True)
  377. gb = 0 # Gigabytes of cached images
  378. self.img_hw0, self.img_hw = [None] * n, [None] * n
  379. results = ThreadPool(8).imap(lambda x: load_image(*x), zip(repeat(self), range(n)))
  380. pbar = tqdm(enumerate(results), total=n)
  381. for i, x in pbar:
  382. if cache_images == 'disk':
  383. if not self.img_npy[i].exists():
  384. np.save(self.img_npy[i].as_posix(), x[0])
  385. gb += self.img_npy[i].stat().st_size
  386. else:
  387. self.imgs[i], self.img_hw0[i], self.img_hw[i] = x
  388. gb += self.imgs[i].nbytes
  389. pbar.desc = f'{prefix}Caching images ({gb / 1E9:.1f}GB)'
  390. pbar.close()
  391. def cache_labels(self, path=Path('./labels.cache'), prefix=''):
  392. # Cache dataset labels, check images and read shapes
  393. x = {} # dict
  394. nm, nf, ne, nc = 0, 0, 0, 0 # number missing, found, empty, duplicate
  395. pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
  396. for i, (im_file, lb_file) in enumerate(pbar):
  397. try:
  398. # verify images
  399. im = Image.open(im_file)
  400. im.verify() # PIL verify
  401. shape = exif_size(im) # image size
  402. segments = [] # instance segments
  403. assert (shape[0] > 9) & (shape[1] > 9), f'image size {shape} <10 pixels'
  404. assert im.format.lower() in img_formats, f'invalid image format {im.format}'
  405. # verify labels
  406. if os.path.isfile(lb_file):
  407. nf += 1 # label found
  408. with open(lb_file, 'r') as f:
  409. l = [x.split() for x in f.read().strip().splitlines()]
  410. if any([len(x) > 8 for x in l]): # is segment
  411. classes = np.array([x[0] for x in l], dtype=np.float32)
  412. segments = [np.array(x[1:], dtype=np.float32).reshape(-1, 2) for x in l] # (cls, xy1...)
  413. l = np.concatenate((classes.reshape(-1, 1), segments2boxes(segments)), 1) # (cls, xywh)
  414. l = np.array(l, dtype=np.float32)
  415. if len(l):
  416. assert l.shape[1] == 5, 'labels require 5 columns each'
  417. assert (l >= 0).all(), 'negative labels'
  418. assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels'
  419. assert np.unique(l, axis=0).shape[0] == l.shape[0], 'duplicate labels'
  420. else:
  421. ne += 1 # label empty
  422. l = np.zeros((0, 5), dtype=np.float32)
  423. else:
  424. nm += 1 # label missing
  425. l = np.zeros((0, 5), dtype=np.float32)
  426. x[im_file] = [l, shape, segments]
  427. except Exception as e:
  428. nc += 1
  429. print(f'{prefix}WARNING: Ignoring corrupted image and/or label {im_file}: {e}')
  430. pbar.desc = f"{prefix}Scanning '{path.parent / path.stem}' images and labels... " \
  431. f"{nf} found, {nm} missing, {ne} empty, {nc} corrupted"
  432. pbar.close()
  433. if nf == 0:
  434. print(f'{prefix}WARNING: No labels found in {path}. See {help_url}')
  435. x['hash'] = get_hash(self.label_files + self.img_files)
  436. x['results'] = nf, nm, ne, nc, i + 1
  437. x['version'] = 0.1 # cache version
  438. torch.save(x, path) # save for next time
  439. logging.info(f'{prefix}New cache created: {path}')
  440. return x
  441. def __len__(self):
  442. return len(self.img_files)
  443. # def __iter__(self):
  444. # self.count = -1
  445. # print('ran dataset iter')
  446. # #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
  447. # return self
  448. def __getitem__(self, index):
  449. index = self.indices[index] # linear, shuffled, or image_weights
  450. hyp = self.hyp
  451. mosaic = self.mosaic and random.random() < hyp['mosaic']
  452. if mosaic:
  453. # Load mosaic
  454. if random.random() < 0.8:
  455. img, labels = load_mosaic(self, index)
  456. else:
  457. img, labels = load_mosaic9(self, index)
  458. shapes = None
  459. # MixUp https://arxiv.org/pdf/1710.09412.pdf
  460. if random.random() < hyp['mixup']:
  461. if random.random() < 0.8:
  462. img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
  463. else:
  464. img2, labels2 = load_mosaic9(self, random.randint(0, len(self.labels) - 1))
  465. r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
  466. img = (img * r + img2 * (1 - r)).astype(np.uint8)
  467. labels = np.concatenate((labels, labels2), 0)
  468. else:
  469. # Load image
  470. img, (h0, w0), (h, w) = load_image(self, index)
  471. # Letterbox
  472. shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
  473. img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
  474. shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
  475. labels = self.labels[index].copy()
  476. if labels.size: # normalized xywh to pixel xyxy format
  477. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], ratio[0] * w, ratio[1] * h, padw=pad[0], padh=pad[1])
  478. if self.augment:
  479. # Augment imagespace
  480. if not mosaic:
  481. img, labels = random_perspective(img, labels,
  482. degrees=hyp['degrees'],
  483. translate=hyp['translate'],
  484. scale=hyp['scale'],
  485. shear=hyp['shear'],
  486. perspective=hyp['perspective'])
  487. #img, labels = self.albumentations(img, labels)
  488. # Augment colorspace
  489. augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
  490. # Apply cutouts
  491. # if random.random() < 0.9:
  492. # labels = cutout(img, labels)
  493. if random.random() < hyp['paste_in']:
  494. sample_labels, sample_images, sample_masks = [], [], []
  495. while len(sample_labels) < 30:
  496. sample_labels_, sample_images_, sample_masks_ = load_samples(self, random.randint(0, len(self.labels) - 1))
  497. sample_labels += sample_labels_
  498. sample_images += sample_images_
  499. sample_masks += sample_masks_
  500. #print(len(sample_labels))
  501. if len(sample_labels) == 0:
  502. break
  503. labels = pastein(img, labels, sample_labels, sample_images, sample_masks)
  504. nL = len(labels) # number of labels
  505. if nL:
  506. labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
  507. labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
  508. labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
  509. if self.augment:
  510. # flip up-down
  511. if random.random() < hyp['flipud']:
  512. img = np.flipud(img)
  513. if nL:
  514. labels[:, 2] = 1 - labels[:, 2]
  515. # flip left-right
  516. if random.random() < hyp['fliplr']:
  517. img = np.fliplr(img)
  518. if nL:
  519. labels[:, 1] = 1 - labels[:, 1]
  520. labels_out = torch.zeros((nL, 6))
  521. if nL:
  522. labels_out[:, 1:] = torch.from_numpy(labels)
  523. # Convert
  524. img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
  525. img = np.ascontiguousarray(img)
  526. return torch.from_numpy(img), labels_out, self.img_files[index], shapes
  527. @staticmethod
  528. def collate_fn(batch):
  529. img, label, path, shapes = zip(*batch) # transposed
  530. for i, l in enumerate(label):
  531. l[:, 0] = i # add target image index for build_targets()
  532. return torch.stack(img, 0), torch.cat(label, 0), path, shapes
  533. @staticmethod
  534. def collate_fn4(batch):
  535. img, label, path, shapes = zip(*batch) # transposed
  536. n = len(shapes) // 4
  537. img4, label4, path4, shapes4 = [], [], path[:n], shapes[:n]
  538. ho = torch.tensor([[0., 0, 0, 1, 0, 0]])
  539. wo = torch.tensor([[0., 0, 1, 0, 0, 0]])
  540. s = torch.tensor([[1, 1, .5, .5, .5, .5]]) # scale
  541. for i in range(n): # zidane torch.zeros(16,3,720,1280) # BCHW
  542. i *= 4
  543. if random.random() < 0.5:
  544. im = F.interpolate(img[i].unsqueeze(0).float(), scale_factor=2., mode='bilinear', align_corners=False)[
  545. 0].type(img[i].type())
  546. l = label[i]
  547. else:
  548. im = torch.cat((torch.cat((img[i], img[i + 1]), 1), torch.cat((img[i + 2], img[i + 3]), 1)), 2)
  549. l = torch.cat((label[i], label[i + 1] + ho, label[i + 2] + wo, label[i + 3] + ho + wo), 0) * s
  550. img4.append(im)
  551. label4.append(l)
  552. for i, l in enumerate(label4):
  553. l[:, 0] = i # add target image index for build_targets()
  554. return torch.stack(img4, 0), torch.cat(label4, 0), path4, shapes4
  555. # Ancillary functions --------------------------------------------------------------------------------------------------
  556. def load_image(self, index):
  557. # loads 1 image from dataset, returns img, original hw, resized hw
  558. img = self.imgs[index]
  559. if img is None: # not cached
  560. path = self.img_files[index]
  561. img = cv2.imread(path) # BGR
  562. assert img is not None, 'Image Not Found ' + path
  563. h0, w0 = img.shape[:2] # orig hw
  564. r = self.img_size / max(h0, w0) # resize image to img_size
  565. if r != 1: # always resize down, only resize up if training with augmentation
  566. interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
  567. img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
  568. return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
  569. else:
  570. return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
  571. def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
  572. r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
  573. hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
  574. dtype = img.dtype # uint8
  575. x = np.arange(0, 256, dtype=np.int16)
  576. lut_hue = ((x * r[0]) % 180).astype(dtype)
  577. lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
  578. lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
  579. img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
  580. cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
  581. def hist_equalize(img, clahe=True, bgr=False):
  582. # Equalize histogram on BGR image 'img' with img.shape(n,m,3) and range 0-255
  583. yuv = cv2.cvtColor(img, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
  584. if clahe:
  585. c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
  586. yuv[:, :, 0] = c.apply(yuv[:, :, 0])
  587. else:
  588. yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
  589. return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
  590. def load_mosaic(self, index):
  591. # loads images in a 4-mosaic
  592. labels4, segments4 = [], []
  593. s = self.img_size
  594. yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
  595. indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
  596. for i, index in enumerate(indices):
  597. # Load image
  598. img, _, (h, w) = load_image(self, index)
  599. # place img in img4
  600. if i == 0: # top left
  601. img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  602. x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
  603. x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
  604. elif i == 1: # top right
  605. x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
  606. x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
  607. elif i == 2: # bottom left
  608. x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
  609. x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
  610. elif i == 3: # bottom right
  611. x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
  612. x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
  613. img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
  614. padw = x1a - x1b
  615. padh = y1a - y1b
  616. # Labels
  617. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  618. if labels.size:
  619. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
  620. segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
  621. labels4.append(labels)
  622. segments4.extend(segments)
  623. # Concat/clip labels
  624. labels4 = np.concatenate(labels4, 0)
  625. for x in (labels4[:, 1:], *segments4):
  626. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  627. # img4, labels4 = replicate(img4, labels4) # replicate
  628. # Augment
  629. #img4, labels4, segments4 = remove_background(img4, labels4, segments4)
  630. #sample_segments(img4, labels4, segments4, probability=self.hyp['copy_paste'])
  631. img4, labels4, segments4 = copy_paste(img4, labels4, segments4, probability=self.hyp['copy_paste'])
  632. img4, labels4 = random_perspective(img4, labels4, segments4,
  633. degrees=self.hyp['degrees'],
  634. translate=self.hyp['translate'],
  635. scale=self.hyp['scale'],
  636. shear=self.hyp['shear'],
  637. perspective=self.hyp['perspective'],
  638. border=self.mosaic_border) # border to remove
  639. return img4, labels4
  640. def load_mosaic9(self, index):
  641. # loads images in a 9-mosaic
  642. labels9, segments9 = [], []
  643. s = self.img_size
  644. indices = [index] + random.choices(self.indices, k=8) # 8 additional image indices
  645. for i, index in enumerate(indices):
  646. # Load image
  647. img, _, (h, w) = load_image(self, index)
  648. # place img in img9
  649. if i == 0: # center
  650. img9 = np.full((s * 3, s * 3, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  651. h0, w0 = h, w
  652. c = s, s, s + w, s + h # xmin, ymin, xmax, ymax (base) coordinates
  653. elif i == 1: # top
  654. c = s, s - h, s + w, s
  655. elif i == 2: # top right
  656. c = s + wp, s - h, s + wp + w, s
  657. elif i == 3: # right
  658. c = s + w0, s, s + w0 + w, s + h
  659. elif i == 4: # bottom right
  660. c = s + w0, s + hp, s + w0 + w, s + hp + h
  661. elif i == 5: # bottom
  662. c = s + w0 - w, s + h0, s + w0, s + h0 + h
  663. elif i == 6: # bottom left
  664. c = s + w0 - wp - w, s + h0, s + w0 - wp, s + h0 + h
  665. elif i == 7: # left
  666. c = s - w, s + h0 - h, s, s + h0
  667. elif i == 8: # top left
  668. c = s - w, s + h0 - hp - h, s, s + h0 - hp
  669. padx, pady = c[:2]
  670. x1, y1, x2, y2 = [max(x, 0) for x in c] # allocate coords
  671. # Labels
  672. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  673. if labels.size:
  674. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padx, pady) # normalized xywh to pixel xyxy format
  675. segments = [xyn2xy(x, w, h, padx, pady) for x in segments]
  676. labels9.append(labels)
  677. segments9.extend(segments)
  678. # Image
  679. img9[y1:y2, x1:x2] = img[y1 - pady:, x1 - padx:] # img9[ymin:ymax, xmin:xmax]
  680. hp, wp = h, w # height, width previous
  681. # Offset
  682. yc, xc = [int(random.uniform(0, s)) for _ in self.mosaic_border] # mosaic center x, y
  683. img9 = img9[yc:yc + 2 * s, xc:xc + 2 * s]
  684. # Concat/clip labels
  685. labels9 = np.concatenate(labels9, 0)
  686. labels9[:, [1, 3]] -= xc
  687. labels9[:, [2, 4]] -= yc
  688. c = np.array([xc, yc]) # centers
  689. segments9 = [x - c for x in segments9]
  690. for x in (labels9[:, 1:], *segments9):
  691. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  692. # img9, labels9 = replicate(img9, labels9) # replicate
  693. # Augment
  694. #img9, labels9, segments9 = remove_background(img9, labels9, segments9)
  695. img9, labels9, segments9 = copy_paste(img9, labels9, segments9, probability=self.hyp['copy_paste'])
  696. img9, labels9 = random_perspective(img9, labels9, segments9,
  697. degrees=self.hyp['degrees'],
  698. translate=self.hyp['translate'],
  699. scale=self.hyp['scale'],
  700. shear=self.hyp['shear'],
  701. perspective=self.hyp['perspective'],
  702. border=self.mosaic_border) # border to remove
  703. return img9, labels9
  704. def load_samples(self, index):
  705. # loads images in a 4-mosaic
  706. labels4, segments4 = [], []
  707. s = self.img_size
  708. yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
  709. indices = [index] + random.choices(self.indices, k=3) # 3 additional image indices
  710. for i, index in enumerate(indices):
  711. # Load image
  712. img, _, (h, w) = load_image(self, index)
  713. # place img in img4
  714. if i == 0: # top left
  715. img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
  716. x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
  717. x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
  718. elif i == 1: # top right
  719. x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
  720. x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
  721. elif i == 2: # bottom left
  722. x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
  723. x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, w, min(y2a - y1a, h)
  724. elif i == 3: # bottom right
  725. x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
  726. x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
  727. img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
  728. padw = x1a - x1b
  729. padh = y1a - y1b
  730. # Labels
  731. labels, segments = self.labels[index].copy(), self.segments[index].copy()
  732. if labels.size:
  733. labels[:, 1:] = xywhn2xyxy(labels[:, 1:], w, h, padw, padh) # normalized xywh to pixel xyxy format
  734. segments = [xyn2xy(x, w, h, padw, padh) for x in segments]
  735. labels4.append(labels)
  736. segments4.extend(segments)
  737. # Concat/clip labels
  738. labels4 = np.concatenate(labels4, 0)
  739. for x in (labels4[:, 1:], *segments4):
  740. np.clip(x, 0, 2 * s, out=x) # clip when using random_perspective()
  741. # img4, labels4 = replicate(img4, labels4) # replicate
  742. # Augment
  743. #img4, labels4, segments4 = remove_background(img4, labels4, segments4)
  744. sample_labels, sample_images, sample_masks = sample_segments(img4, labels4, segments4, probability=0.5)
  745. return sample_labels, sample_images, sample_masks
  746. def copy_paste(img, labels, segments, probability=0.5):
  747. # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
  748. n = len(segments)
  749. if probability and n:
  750. h, w, c = img.shape # height, width, channels
  751. im_new = np.zeros(img.shape, np.uint8)
  752. for j in random.sample(range(n), k=round(probability * n)):
  753. l, s = labels[j], segments[j]
  754. box = w - l[3], l[2], w - l[1], l[4]
  755. ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
  756. if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
  757. labels = np.concatenate((labels, [[l[0], *box]]), 0)
  758. segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
  759. cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
  760. result = cv2.bitwise_and(src1=img, src2=im_new)
  761. result = cv2.flip(result, 1) # augment segments (flip left-right)
  762. i = result > 0 # pixels to replace
  763. # i[:, :] = result.max(2).reshape(h, w, 1) # act over ch
  764. img[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
  765. return img, labels, segments
  766. def remove_background(img, labels, segments):
  767. # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
  768. n = len(segments)
  769. h, w, c = img.shape # height, width, channels
  770. im_new = np.zeros(img.shape, np.uint8)
  771. img_new = np.ones(img.shape, np.uint8) * 114
  772. for j in range(n):
  773. cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
  774. result = cv2.bitwise_and(src1=img, src2=im_new)
  775. i = result > 0 # pixels to replace
  776. img_new[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
  777. return img_new, labels, segments
  778. def sample_segments(img, labels, segments, probability=0.5):
  779. # Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
  780. n = len(segments)
  781. sample_labels = []
  782. sample_images = []
  783. sample_masks = []
  784. if probability and n:
  785. h, w, c = img.shape # height, width, channels
  786. for j in random.sample(range(n), k=round(probability * n)):
  787. l, s = labels[j], segments[j]
  788. box = l[1].astype(int).clip(0,w-1), l[2].astype(int).clip(0,h-1), l[3].astype(int).clip(0,w-1), l[4].astype(int).clip(0,h-1)
  789. #print(box)
  790. if (box[2] <= box[0]) or (box[3] <= box[1]):
  791. continue
  792. sample_labels.append(l[0])
  793. mask = np.zeros(img.shape, np.uint8)
  794. cv2.drawContours(mask, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
  795. sample_masks.append(mask[box[1]:box[3],box[0]:box[2],:])
  796. result = cv2.bitwise_and(src1=img, src2=mask)
  797. i = result > 0 # pixels to replace
  798. mask[i] = result[i] # cv2.imwrite('debug.jpg', img) # debug
  799. #print(box)
  800. sample_images.append(mask[box[1]:box[3],box[0]:box[2],:])
  801. return sample_labels, sample_images, sample_masks
  802. def replicate(img, labels):
  803. # Replicate labels
  804. h, w = img.shape[:2]
  805. boxes = labels[:, 1:].astype(int)
  806. x1, y1, x2, y2 = boxes.T
  807. s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
  808. for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
  809. x1b, y1b, x2b, y2b = boxes[i]
  810. bh, bw = y2b - y1b, x2b - x1b
  811. yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
  812. x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
  813. img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
  814. labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
  815. return img, labels
  816. def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
  817. # Resize and pad image while meeting stride-multiple constraints
  818. shape = img.shape[:2] # current shape [height, width]
  819. if isinstance(new_shape, int):
  820. new_shape = (new_shape, new_shape)
  821. # Scale ratio (new / old)
  822. r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
  823. if not scaleup: # only scale down, do not scale up (for better test mAP)
  824. r = min(r, 1.0)
  825. # Compute padding
  826. ratio = r, r # width, height ratios
  827. new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
  828. dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
  829. if auto: # minimum rectangle
  830. dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
  831. elif scaleFill: # stretch
  832. dw, dh = 0.0, 0.0
  833. new_unpad = (new_shape[1], new_shape[0])
  834. ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
  835. dw /= 2 # divide padding into 2 sides
  836. dh /= 2
  837. if shape[::-1] != new_unpad: # resize
  838. img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
  839. top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
  840. left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
  841. img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
  842. return img, ratio, (dw, dh)
  843. def random_perspective(img, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
  844. border=(0, 0)):
  845. # torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
  846. # targets = [cls, xyxy]
  847. height = img.shape[0] + border[0] * 2 # shape(h,w,c)
  848. width = img.shape[1] + border[1] * 2
  849. # Center
  850. C = np.eye(3)
  851. C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
  852. C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
  853. # Perspective
  854. P = np.eye(3)
  855. P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
  856. P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
  857. # Rotation and Scale
  858. R = np.eye(3)
  859. a = random.uniform(-degrees, degrees)
  860. # a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
  861. s = random.uniform(1 - scale, 1.1 + scale)
  862. # s = 2 ** random.uniform(-scale, scale)
  863. R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
  864. # Shear
  865. S = np.eye(3)
  866. S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
  867. S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
  868. # Translation
  869. T = np.eye(3)
  870. T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
  871. T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
  872. # Combined rotation matrix
  873. M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
  874. if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
  875. if perspective:
  876. img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
  877. else: # affine
  878. img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
  879. # Visualize
  880. # import matplotlib.pyplot as plt
  881. # ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
  882. # ax[0].imshow(img[:, :, ::-1]) # base
  883. # ax[1].imshow(img2[:, :, ::-1]) # warped
  884. # Transform label coordinates
  885. n = len(targets)
  886. if n:
  887. use_segments = any(x.any() for x in segments)
  888. new = np.zeros((n, 4))
  889. if use_segments: # warp segments
  890. segments = resample_segments(segments) # upsample
  891. for i, segment in enumerate(segments):
  892. xy = np.ones((len(segment), 3))
  893. xy[:, :2] = segment
  894. xy = xy @ M.T # transform
  895. xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
  896. # clip
  897. new[i] = segment2box(xy, width, height)
  898. else: # warp boxes
  899. xy = np.ones((n * 4, 3))
  900. xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
  901. xy = xy @ M.T # transform
  902. xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
  903. # create new boxes
  904. x = xy[:, [0, 2, 4, 6]]
  905. y = xy[:, [1, 3, 5, 7]]
  906. new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
  907. # clip
  908. new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
  909. new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
  910. # filter candidates
  911. i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
  912. targets = targets[i]
  913. targets[:, 1:5] = new[i]
  914. return img, targets
  915. def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
  916. # Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
  917. w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
  918. w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
  919. ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
  920. return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
  921. def bbox_ioa(box1, box2):
  922. # Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
  923. box2 = box2.transpose()
  924. # Get the coordinates of bounding boxes
  925. b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
  926. b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
  927. # Intersection area
  928. inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
  929. (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
  930. # box2 area
  931. box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
  932. # Intersection over box2 area
  933. return inter_area / box2_area
  934. def cutout(image, labels):
  935. # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
  936. h, w = image.shape[:2]
  937. # create random masks
  938. scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
  939. for s in scales:
  940. mask_h = random.randint(1, int(h * s))
  941. mask_w = random.randint(1, int(w * s))
  942. # box
  943. xmin = max(0, random.randint(0, w) - mask_w // 2)
  944. ymin = max(0, random.randint(0, h) - mask_h // 2)
  945. xmax = min(w, xmin + mask_w)
  946. ymax = min(h, ymin + mask_h)
  947. # apply random color mask
  948. image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
  949. # return unobscured labels
  950. if len(labels) and s > 0.03:
  951. box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
  952. ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
  953. labels = labels[ioa < 0.60] # remove >60% obscured labels
  954. return labels
  955. def pastein(image, labels, sample_labels, sample_images, sample_masks):
  956. # Applies image cutout augmentation https://arxiv.org/abs/1708.04552
  957. h, w = image.shape[:2]
  958. # create random masks
  959. scales = [0.75] * 2 + [0.5] * 4 + [0.25] * 4 + [0.125] * 4 + [0.0625] * 6 # image size fraction
  960. for s in scales:
  961. if random.random() < 0.2:
  962. continue
  963. mask_h = random.randint(1, int(h * s))
  964. mask_w = random.randint(1, int(w * s))
  965. # box
  966. xmin = max(0, random.randint(0, w) - mask_w // 2)
  967. ymin = max(0, random.randint(0, h) - mask_h // 2)
  968. xmax = min(w, xmin + mask_w)
  969. ymax = min(h, ymin + mask_h)
  970. box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
  971. if len(labels):
  972. ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
  973. else:
  974. ioa = np.zeros(1)
  975. if (ioa < 0.30).all() and len(sample_labels) and (xmax > xmin+20) and (ymax > ymin+20): # allow 30% obscuration of existing labels
  976. sel_ind = random.randint(0, len(sample_labels)-1)
  977. #print(len(sample_labels))
  978. #print(sel_ind)
  979. #print((xmax-xmin, ymax-ymin))
  980. #print(image[ymin:ymax, xmin:xmax].shape)
  981. #print([[sample_labels[sel_ind], *box]])
  982. #print(labels.shape)
  983. hs, ws, cs = sample_images[sel_ind].shape
  984. r_scale = min((ymax-ymin)/hs, (xmax-xmin)/ws)
  985. r_w = int(ws*r_scale)
  986. r_h = int(hs*r_scale)
  987. if (r_w > 10) and (r_h > 10):
  988. r_mask = cv2.resize(sample_masks[sel_ind], (r_w, r_h))
  989. r_image = cv2.resize(sample_images[sel_ind], (r_w, r_h))
  990. temp_crop = image[ymin:ymin+r_h, xmin:xmin+r_w]
  991. m_ind = r_mask > 0
  992. if m_ind.astype(np.int).sum() > 60:
  993. temp_crop[m_ind] = r_image[m_ind]
  994. #print(sample_labels[sel_ind])
  995. #print(sample_images[sel_ind].shape)
  996. #print(temp_crop.shape)
  997. box = np.array([xmin, ymin, xmin+r_w, ymin+r_h], dtype=np.float32)
  998. if len(labels):
  999. labels = np.concatenate((labels, [[sample_labels[sel_ind], *box]]), 0)
  1000. else:
  1001. labels = np.array([[sample_labels[sel_ind], *box]])
  1002. image[ymin:ymin+r_h, xmin:xmin+r_w] = temp_crop
  1003. return labels
  1004. class Albumentations:
  1005. # YOLOv5 Albumentations class (optional, only used if package is installed)
  1006. def __init__(self):
  1007. self.transform = None
  1008. import albumentations as A
  1009. self.transform = A.Compose([
  1010. A.CLAHE(p=0.01),
  1011. A.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.01),
  1012. A.RandomGamma(gamma_limit=[80, 120], p=0.01),
  1013. A.Blur(p=0.01),
  1014. A.MedianBlur(p=0.01),
  1015. A.ToGray(p=0.01),
  1016. A.ImageCompression(quality_lower=75, p=0.01),],
  1017. bbox_params=A.BboxParams(format='pascal_voc', label_fields=['class_labels']))
  1018. #logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
  1019. def __call__(self, im, labels, p=1.0):
  1020. if self.transform and random.random() < p:
  1021. new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
  1022. im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
  1023. return im, labels
  1024. def create_folder(path='./new'):
  1025. # Create folder
  1026. if os.path.exists(path):
  1027. shutil.rmtree(path) # delete output folder
  1028. os.makedirs(path) # make new output folder
  1029. def flatten_recursive(path='../coco'):
  1030. # Flatten a recursive directory by bringing all files to top level
  1031. new_path = Path(path + '_flat')
  1032. create_folder(new_path)
  1033. for file in tqdm(glob.glob(str(Path(path)) + '/**/*.*', recursive=True)):
  1034. shutil.copyfile(file, new_path / Path(file).name)
  1035. def extract_boxes(path='../coco/'): # from utils.datasets import *; extract_boxes('../coco128')
  1036. # Convert detection dataset into classification dataset, with one directory per class
  1037. path = Path(path) # images dir
  1038. shutil.rmtree(path / 'classifier') if (path / 'classifier').is_dir() else None # remove existing
  1039. files = list(path.rglob('*.*'))
  1040. n = len(files) # number of files
  1041. for im_file in tqdm(files, total=n):
  1042. if im_file.suffix[1:] in img_formats:
  1043. # image
  1044. im = cv2.imread(str(im_file))[..., ::-1] # BGR to RGB
  1045. h, w = im.shape[:2]
  1046. # labels
  1047. lb_file = Path(img2label_paths([str(im_file)])[0])
  1048. if Path(lb_file).exists():
  1049. with open(lb_file, 'r') as f:
  1050. lb = np.array([x.split() for x in f.read().strip().splitlines()], dtype=np.float32) # labels
  1051. for j, x in enumerate(lb):
  1052. c = int(x[0]) # class
  1053. f = (path / 'classifier') / f'{c}' / f'{path.stem}_{im_file.stem}_{j}.jpg' # new filename
  1054. if not f.parent.is_dir():
  1055. f.parent.mkdir(parents=True)
  1056. b = x[1:] * [w, h, w, h] # box
  1057. # b[2:] = b[2:].max() # rectangle to square
  1058. b[2:] = b[2:] * 1.2 + 3 # pad
  1059. b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
  1060. b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
  1061. b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
  1062. assert cv2.imwrite(str(f), im[b[1]:b[3], b[0]:b[2]]), f'box failure in {f}'
  1063. def autosplit(path='../coco', weights=(0.9, 0.1, 0.0), annotated_only=False):
  1064. """ Autosplit a dataset into train/val/test splits and save path/autosplit_*.txt files
  1065. Usage: from utils.datasets import *; autosplit('../coco')
  1066. Arguments
  1067. path: Path to images directory
  1068. weights: Train, val, test weights (list)
  1069. annotated_only: Only use images with an annotated txt file
  1070. """
  1071. path = Path(path) # images dir
  1072. files = sum([list(path.rglob(f"*.{img_ext}")) for img_ext in img_formats], []) # image files only
  1073. n = len(files) # number of files
  1074. indices = random.choices([0, 1, 2], weights=weights, k=n) # assign each image to a split
  1075. txt = ['autosplit_train.txt', 'autosplit_val.txt', 'autosplit_test.txt'] # 3 txt files
  1076. [(path / x).unlink() for x in txt if (path / x).exists()] # remove existing
  1077. print(f'Autosplitting images from {path}' + ', using *.txt labeled images only' * annotated_only)
  1078. for i, img in tqdm(zip(indices, files), total=n):
  1079. if not annotated_only or Path(img2label_paths([str(img)])[0]).exists(): # check label
  1080. with open(path / txt[i], 'a') as f:
  1081. f.write(str(img) + '\n') # add image to txt file
  1082. def load_segmentations(self, index):
  1083. key = '/work/handsomejw66/coco17/' + self.img_files[index]
  1084. #print(key)
  1085. # /work/handsomejw66/coco17/
  1086. return self.segs[key]