detect.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. import argparse
  2. import time
  3. from pathlib import Path
  4. import cv2
  5. import torch
  6. import torch.backends.cudnn as cudnn
  7. from numpy import random
  8. from models.experimental import attempt_load
  9. from utils.datasets import LoadStreams, LoadImages
  10. from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
  11. scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
  12. from utils.plots import plot_one_box
  13. from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
  14. import matplotlib.pyplot as plt
  15. from time import sleep
  16. class frame_:
  17. def __init__(self):
  18. self.model = attempt_load(opt.weights, map_location=opt.device).half() # load FP32 model -> FP16
  19. self.stride = int(self.model.stride.max()) # model stride
  20. self.imgsz = check_img_size(self.imgsz, s=self.stride) # check img_size
  21. self.x = LoadStreams(opt.source, img_size=opt.imgsz, stride=opt.stride)
  22. def inference(cls):
  23. y = cls.model(cls.x)
  24. print(y)
  25. return y
  26. def detect(save_img=False):
  27. source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
  28. save_img = not opt.nosave and not source.endswith('.txt') # save inference images
  29. webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
  30. ('rtsp://', 'rtmp://', 'http://', 'https://'))
  31. # Directories
  32. save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
  33. (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
  34. # Initialize
  35. set_logging()
  36. device = select_device(opt.device)
  37. half = device.type != 'cpu' # half precision only supported on CUDA
  38. # Load model
  39. model = attempt_load(weights, map_location=device) # load FP32 model
  40. stride = int(model.stride.max()) # model stride
  41. imgsz = check_img_size(imgsz, s=stride) # check img_size
  42. if trace:
  43. model = TracedModel(model, device, opt.img_size)
  44. if half:
  45. model.half() # to FP16
  46. # Second-stage classifier
  47. classify = False
  48. if classify:
  49. modelc = load_classifier(name='resnet101', n=2) # initialize
  50. modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
  51. # Set Dataloader
  52. vid_path, vid_writer = None, None
  53. if webcam:
  54. view_img = check_imshow()
  55. print(f'view_img : {view_img}')
  56. cudnn.benchmark = True # set True to speed up constant image size inference
  57. dataset = LoadStreams(source, img_size=imgsz, stride=stride)
  58. else:
  59. dataset = LoadImages(source, img_size=imgsz, stride=stride)
  60. # Get names and colors
  61. names = model.module.names if hasattr(model, 'module') else model.names
  62. colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
  63. # Run inference
  64. if device.type != 'cpu':
  65. model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
  66. old_img_w = old_img_h = imgsz
  67. old_img_b = 1
  68. t0 = time.time()
  69. for path, img, im0s, vid_cap in dataset:
  70. img = torch.from_numpy(img).to(device)
  71. img = img.half() if half else img.float() # uint8 to fp16/32
  72. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  73. if img.ndimension() == 3:
  74. img = img.unsqueeze(0)
  75. # Warmup
  76. if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
  77. old_img_b = img.shape[0]
  78. old_img_h = img.shape[2]
  79. old_img_w = img.shape[3]
  80. for i in range(3):
  81. model(img, augment=opt.augment)[0]
  82. # Inference
  83. t1 = time_synchronized()
  84. with torch.no_grad(): # Calculating gradients would cause a GPU memory leak
  85. pred = model(img, augment=opt.augment)[0]
  86. t2 = time_synchronized()
  87. # Apply NMS
  88. pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
  89. t3 = time_synchronized()
  90. # Apply Classifier
  91. if classify:
  92. pred = apply_classifier(pred, modelc, img, im0s)
  93. # Process detections
  94. for i, det in enumerate(pred): # detections per image
  95. if webcam: # batch_size >= 1
  96. p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
  97. else:
  98. p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
  99. p = Path(p) # to Path
  100. save_path = str(save_dir / p.name) # img.jpg
  101. txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
  102. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  103. if len(det):
  104. # Rescale boxes from img_size to im0 size
  105. det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
  106. # Print results
  107. for c in det[:, -1].unique():
  108. n = (det[:, -1] == c).sum() # detections per class
  109. s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
  110. # Write results
  111. for *xyxy, conf, cls in reversed(det):
  112. if save_txt: # Write to file
  113. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
  114. line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
  115. with open(txt_path + '.txt', 'a') as f:
  116. f.write(('%g ' * len(line)).rstrip() % line + '\n')
  117. if save_img or view_img: # Add bbox to image
  118. label = f'{names[int(cls)]} {conf:.2f}'
  119. plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
  120. # Print time (inference + NMS)
  121. print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
  122. # Stream results
  123. if view_img:
  124. cv2.imshow(str(p), im0)
  125. # plt.imshow(str(p), im0)
  126. cv2.waitKey(1) # 1 millisecond
  127. # Save results (image with detections)
  128. if save_img:
  129. if dataset.mode == 'image':
  130. cv2.imwrite(save_path, im0)
  131. print(f" The image with the result is saved in: {save_path}")
  132. else: # 'video' or 'stream'
  133. if vid_path != save_path: # new video
  134. vid_path = save_path
  135. if isinstance(vid_writer, cv2.VideoWriter):
  136. vid_writer.release() # release previous video writer
  137. if vid_cap: # video
  138. fps = vid_cap.get(cv2.CAP_PROP_FPS)
  139. w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  140. h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  141. else: # stream
  142. fps, w, h = 30, im0.shape[1], im0.shape[0]
  143. save_path += '.mp4'
  144. vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
  145. vid_writer.write(im0)
  146. if save_txt or save_img:
  147. s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
  148. #print(f"Results saved to {save_dir}{s}")
  149. print(f'Done. ({time.time() - t0:.3f}s)')
  150. if __name__ == '__main__':
  151. parser = argparse.ArgumentParser()
  152. parser.add_argument('--weights', nargs='+', type=str, default='best.pt', help='model.pt path(s)')
  153. parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
  154. parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
  155. parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
  156. parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
  157. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  158. parser.add_argument('--view-img', action='store_true', help='display results')
  159. parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
  160. parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
  161. parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
  162. parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
  163. parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
  164. parser.add_argument('--augment', action='store_true', help='augmented inference')
  165. parser.add_argument('--update', action='store_true', help='update all models')
  166. parser.add_argument('--project', default='runs/detect', help='save results to project/name')
  167. parser.add_argument('--name', default='exp', help='save results to project/name')
  168. parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
  169. parser.add_argument('--no-trace', action='store_true', help='don`t trace model')
  170. parser.add_argument('--stream', default=False, help='stream')
  171. opt = parser.parse_args()
  172. # print(opt)
  173. #check_requirements(exclude=('pycocotools', 'thop'))
  174. with torch.no_grad():
  175. if opt.update: # update all models (to fix SourceChangeWarning)
  176. for opt.weights in ['yolov7.pt']:
  177. detect()
  178. strip_optimizer(opt.weights)
  179. else:
  180. detect()
  181. # if opt.stream == True:
  182. # @app.get("/video")
  183. # def video():
  184. # # StringResponse함수를 return하고,
  185. # # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
  186. # return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
  187. # else: detect()