bridge_wrapper.py 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. '''
  2. A Moduele which binds Yolov7 repo with Deepsort with modifications
  3. '''
  4. import os
  5. os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # comment out below line to enable tensorflow logging outputs
  6. import time
  7. import tensorflow as tf
  8. physical_devices = tf.config.experimental.list_physical_devices('GPU')
  9. if len(physical_devices) > 0:
  10. tf.config.experimental.set_memory_growth(physical_devices[0], True)
  11. import cv2
  12. import numpy as np
  13. import matplotlib.pyplot as plt
  14. from tensorflow.compat.v1 import ConfigProto # DeepSORT official implementation uses tf1.x so we have to do some modifications to avoid errors
  15. # deep sort imports
  16. from deep_sort import preprocessing, nn_matching
  17. from deep_sort.detection import Detection
  18. from deep_sort.tracker import Tracker
  19. # import from helpers
  20. from tracking_helpers import read_class_names, create_box_encoder
  21. from detection_helpers import *
  22. # load configuration for object detector
  23. config = ConfigProto()
  24. config.gpu_options.allow_growth = True
  25. class YOLOv7_DeepSORT:
  26. '''
  27. Class to Wrap ANY detector of YOLO type with DeepSORT
  28. '''
  29. def __init__(self, reID_model_path:str, detector, max_cosine_distance:float=0.4, nn_budget:float=None, nms_max_overlap:float=1.0,
  30. coco_names_path:str ="./io_data/input/classes/coco.names", ):
  31. '''
  32. args:
  33. reID_model_path: Path of the model which uses generates the embeddings for the cropped area for Re identification
  34. detector: object of YOLO models or any model which gives you detections as [x1,y1,x2,y2,scores, class]
  35. max_cosine_distance: Cosine Distance threshold for "SAME" person matching
  36. nn_budget: If not None, fix samples per class to at most this number. Removes the oldest samples when the budget is reached.
  37. nms_max_overlap: Maximum NMs allowed for the tracker
  38. coco_file_path: File wich contains the path to coco naames
  39. '''
  40. self.detector = detector
  41. self.coco_names_path = coco_names_path
  42. self.nms_max_overlap = nms_max_overlap
  43. self.class_names = read_class_names()
  44. # initialize deep sort
  45. self.encoder = create_box_encoder(reID_model_path, batch_size=1)
  46. metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget) # calculate cosine distance metric
  47. self.tracker = Tracker(metric) # initialize tracker
  48. def track_video(self,video:str, output:str, skip_frames:int=0, show_live:bool=False, count_objects:bool=False, verbose:int = 0):
  49. '''
  50. Track any given webcam or video
  51. args:
  52. video: path to input video or set to 0 for webcam
  53. output: path to output video
  54. skip_frames: Skip every nth frame. After saving the video, it'll have very visuals experience due to skipped frames
  55. show_live: Whether to show live video tracking. Press the key 'q' to quit
  56. count_objects: count objects being tracked on screen
  57. verbose: print details on the screen allowed values 0,1,2
  58. '''
  59. try: # begin video capture
  60. vid = cv2.VideoCapture(int(video))
  61. except:
  62. vid = cv2.VideoCapture(video)
  63. out = None
  64. if output: # get video ready to save locally if flag is set
  65. width = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) # by default VideoCapture returns float instead of int
  66. height = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
  67. fps = int(vid.get(cv2.CAP_PROP_FPS))
  68. codec = cv2.VideoWriter_fourcc(*"XVID")
  69. out = cv2.VideoWriter(output, codec, fps, (width, height))
  70. frame_num = 0
  71. while True: # while video is running
  72. return_value, frame = vid.read()
  73. if not return_value:
  74. print('Video has ended or failed!')
  75. break
  76. frame_num +=1
  77. if skip_frames and not frame_num % skip_frames: continue # skip every nth frame. When every frame is not important, you can use this to fasten the process
  78. if verbose >= 1:start_time = time.time()
  79. # -----------------------------------------PUT ANY DETECTION MODEL HERE -----------------------------------------------------------------
  80. yolo_dets = self.detector.detect(frame.copy(), plot_bb = False) # Get the detections
  81. frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  82. if yolo_dets is None:
  83. bboxes = []
  84. scores = []
  85. classes = []
  86. num_objects = 0
  87. else:
  88. bboxes = yolo_dets[:,:4]
  89. bboxes[:,2] = bboxes[:,2] - bboxes[:,0] # convert from xyxy to xywh
  90. bboxes[:,3] = bboxes[:,3] - bboxes[:,1]
  91. scores = yolo_dets[:,4]
  92. classes = yolo_dets[:,-1]
  93. num_objects = bboxes.shape[0]
  94. # ---------------------------------------- DETECTION PART COMPLETED ---------------------------------------------------------------------
  95. names = []
  96. for i in range(num_objects): # loop through objects and use class index to get class name
  97. class_indx = int(classes[i])
  98. class_name = self.class_names[class_indx]
  99. names.append(class_name)
  100. names = np.array(names)
  101. count = len(names)
  102. if count_objects:
  103. cv2.putText(frame, "Objects being tracked: {}".format(count), (5, 35), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5, (0, 0, 0), 2)
  104. # ---------------------------------- DeepSORT tacker work starts here ------------------------------------------------------------
  105. features = self.encoder(frame, bboxes) # encode detections and feed to tracker. [No of BB / detections per frame, embed_size]
  106. detections = [Detection(bbox, score, class_name, feature) for bbox, score, class_name, feature in zip(bboxes, scores, names, features)] # [No of BB per frame] deep_sort.detection.Detection object
  107. cmap = plt.get_cmap('tab20b') #initialize color map
  108. colors = [cmap(i)[:3] for i in np.linspace(0, 1, 20)]
  109. boxs = np.array([d.tlwh for d in detections]) # run non-maxima supression below
  110. scores = np.array([d.confidence for d in detections])
  111. classes = np.array([d.class_name for d in detections])
  112. indices = preprocessing.non_max_suppression(boxs, classes, self.nms_max_overlap, scores)
  113. detections = [detections[i] for i in indices]
  114. self.tracker.predict() # Call the tracker
  115. self.tracker.update(detections) # updtate using Kalman Gain
  116. for track in self.tracker.tracks: # update new findings AKA tracks
  117. if not track.is_confirmed() or track.time_since_update > 1:
  118. continue
  119. bbox = track.to_tlbr()
  120. class_name = track.get_class()
  121. color = colors[int(track.track_id) % len(colors)] # draw bbox on screen
  122. color = [i * 255 for i in color]
  123. cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
  124. cv2.rectangle(frame, (int(bbox[0]), int(bbox[1]-30)), (int(bbox[0])+(len(class_name)+len(str(track.track_id)))*17, int(bbox[1])), color, -1)
  125. cv2.putText(frame, class_name + " : " + str(track.track_id),(int(bbox[0]), int(bbox[1]-11)),0, 0.6, (255,255,255),1, lineType=cv2.LINE_AA)
  126. if verbose == 2:
  127. print("Tracker ID: {}, Class: {}, BBox Coords (xmin, ymin, xmax, ymax): {}".format(str(track.track_id), class_name, (int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3]))))
  128. # -------------------------------- Tracker work ENDS here -----------------------------------------------------------------------
  129. if verbose >= 1:
  130. fps = 1.0 / (time.time() - start_time) # calculate frames per second of running detections
  131. if not count_objects: print(f"Processed frame no: {frame_num} || Current FPS: {round(fps,2)}")
  132. else: print(f"Processed frame no: {frame_num} || Current FPS: {round(fps,2)} || Objects tracked: {count}")
  133. result = np.asarray(frame)
  134. result = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
  135. if output: out.write(result) # save output video
  136. if show_live:
  137. cv2.imshow("Output Video", result)
  138. if cv2.waitKey(1) & 0xFF == ord('q'): break
  139. cv2.destroyAllWindows()