detect.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. import argparse
  2. import time
  3. from pathlib import Path
  4. from collections import deque
  5. import cv2
  6. import torch
  7. import torch.backends.cudnn as cudnn
  8. from numpy import random
  9. from models.experimental import attempt_load
  10. from utils.datasets import LoadStreams, LoadImages
  11. from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
  12. scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
  13. from utils.plots import plot_one_box
  14. from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
  15. import matplotlib.pyplot as plt
  16. from time import sleep
  17. class frame_:
  18. def __init__(self):
  19. self.model = attempt_load(opt.weights, map_location=opt.device).half() # load FP32 model -> FP16
  20. self.stride = int(self.model.stride.max()) # model stride
  21. self.imgsz = check_img_size(self.imgsz, s=self.stride) # check img_size
  22. self.x = LoadStreams(opt.source, img_size=opt.imgsz, stride=opt.stride)
  23. def inference(cls):
  24. y = cls.model(cls.x)
  25. print(y)
  26. return y
  27. def detect(save_img=False):
  28. source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
  29. save_img = not opt.nosave and not source.endswith('.txt') # save inference images
  30. webcam = source.isnumeric() or source.endswith('.txt') or source.lower().startswith(
  31. ('rtsp://', 'rtmp://', 'http://', 'https://'))
  32. # Directories
  33. save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run
  34. (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
  35. # Initialize
  36. set_logging()
  37. device = select_device(opt.device)
  38. half = device.type != 'cpu' # half precision only supported on CUDA
  39. # Load model
  40. model = attempt_load(weights, map_location=device) # load FP32 model
  41. stride = int(model.stride.max()) # model stride
  42. imgsz = check_img_size(imgsz, s=stride) # check img_size
  43. if trace:
  44. model = TracedModel(model, device, opt.img_size)
  45. if half:
  46. model.half() # to FP16
  47. # Second-stage classifier
  48. classify = False
  49. if classify:
  50. modelc = load_classifier(name='resnet101', n=2) # initialize
  51. modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model']).to(device).eval()
  52. # Set Dataloader
  53. vid_path, vid_writer = None, None
  54. if webcam:
  55. view_img = check_imshow()
  56. print(f'view_img : {view_img}')
  57. cudnn.benchmark = True # set True to speed up constant image size inference
  58. dataset = LoadStreams(source, img_size=imgsz, stride=stride)
  59. else:
  60. dataset = LoadImages(source, img_size=imgsz, stride=stride)
  61. # Get names and colors
  62. names = model.module.names if hasattr(model, 'module') else model.names
  63. colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
  64. # Run inference
  65. if device.type != 'cpu':
  66. model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
  67. old_img_w = old_img_h = imgsz
  68. old_img_b = 1
  69. import pdb
  70. queue = deque()
  71. t0 = time.time()
  72. for path, img, im0s, vid_cap in dataset:
  73. img = torch.from_numpy(img).to(device)
  74. img = img.half() if half else img.float() # uint8 to fp16/32
  75. img /= 255.0 # 0 - 255 to 0.0 - 1.0
  76. if img.ndimension() == 3:
  77. img = img.unsqueeze(0)
  78. print(img, img.shape)
  79. pdb.set_trace()
  80. # Warmup
  81. if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
  82. old_img_b = img.shape[0]
  83. old_img_h = img.shape[2]
  84. old_img_w = img.shape[3]
  85. for i in range(3):
  86. model(img, augment=opt.augment)[0]
  87. print(opt.augment)
  88. pdb.set_trace()
  89. # Inference
  90. t1 = time_synchronized()
  91. with torch.no_grad(): # Calculating gradients would cause a GPU memory leak
  92. pred = model(img, augment=opt.augment)[0]
  93. t2 = time_synchronized()
  94. # Apply NMS
  95. pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
  96. t3 = time_synchronized()
  97. # Apply Classifier
  98. if classify:
  99. pred = apply_classifier(pred, modelc, img, im0s)
  100. # Process detections
  101. for i, det in enumerate(pred): # detections per image
  102. if webcam: # batch_size >= 1
  103. p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
  104. else:
  105. p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
  106. p = Path(p) # to Path
  107. save_path = str(save_dir / p.name) # img.jpg
  108. txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # img.txt
  109. gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  110. if len(det):
  111. # Rescale boxes from img_size to im0 size
  112. det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
  113. # Print results
  114. for c in det[:, -1].unique():
  115. n = (det[:, -1] == c).sum() # detections per class
  116. s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
  117. # Write results
  118. for *xyxy, conf, cls in reversed(det):
  119. if save_txt: # Write to file
  120. xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
  121. line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
  122. with open(txt_path + '.txt', 'a') as f:
  123. f.write(('%g ' * len(line)).rstrip() % line + '\n')
  124. if save_img or view_img: # Add bbox to image
  125. label = f'{names[int(cls)]} {conf:.2f}'
  126. plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
  127. # Print time (inference + NMS)
  128. print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
  129. # Stream results
  130. queue.append(im0)
  131. if view_img:
  132. cv2.imshow(str(p), im0)
  133. # plt.imshow(str(p), im0)
  134. cv2.waitKey(1) # 1 millisecond
  135. # Save results (image with detections)
  136. if save_img:
  137. if dataset.mode == 'image':
  138. cv2.imwrite(save_path, im0)
  139. print(f" The image with the result is saved in: {save_path}")
  140. else: # 'video' or 'stream'
  141. if vid_path != save_path: # new video
  142. vid_path = save_path
  143. if isinstance(vid_writer, cv2.VideoWriter):
  144. vid_writer.release() # release previous video writer
  145. if vid_cap: # video
  146. fps = vid_cap.get(cv2.CAP_PROP_FPS)
  147. w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  148. h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  149. else: # stream
  150. fps, w, h = 30, im0.shape[1], im0.shape[0]
  151. save_path += '.mp4'
  152. vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
  153. vid_writer.write(im0)
  154. if save_txt or save_img:
  155. s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
  156. #print(f"Results saved to {save_dir}{s}")
  157. print(f'Done. ({time.time() - t0:.3f}s)')
  158. if __name__ == '__main__':
  159. parser = argparse.ArgumentParser()
  160. parser.add_argument('--weights', nargs='+', type=str, default='best.pt', help='model.pt path(s)')
  161. parser.add_argument('--source', type=str, default='inference/images', help='source') # file/folder, 0 for webcam
  162. parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
  163. parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
  164. parser.add_argument('--iou-thres', type=float, default=0.45, help='IOU threshold for NMS')
  165. parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
  166. parser.add_argument('--view-img', action='store_true', help='display results')
  167. parser.add_argument('--save-txt', action='store_true', help='save results to *.txt')
  168. parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels')
  169. parser.add_argument('--nosave', action='store_true', help='do not save images/videos')
  170. parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --class 0, or --class 0 2 3')
  171. parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
  172. parser.add_argument('--augment', action='store_true', help='augmented inference')
  173. parser.add_argument('--update', action='store_true', help='update all models')
  174. parser.add_argument('--project', default='runs/detect', help='save results to project/name')
  175. parser.add_argument('--name', default='exp', help='save results to project/name')
  176. parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
  177. parser.add_argument('--no-trace', action='store_true', help='don`t trace model')
  178. parser.add_argument('--stream', default=False, help='stream')
  179. opt = parser.parse_args()
  180. # print(opt)
  181. #check_requirements(exclude=('pycocotools', 'thop'))
  182. with torch.no_grad():
  183. if opt.update: # update all models (to fix SourceChangeWarning)
  184. for opt.weights in ['yolov7.pt']:
  185. detect()
  186. strip_optimizer(opt.weights)
  187. else:
  188. detect()
  189. # if opt.stream == True:
  190. # @app.get("/video")
  191. # def video():
  192. # # StringResponse함수를 return하고,
  193. # # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
  194. # return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
  195. # else: detect()