tracking_helpers.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265
  1. # vim: expandtab:ts=4:sw=4
  2. import os
  3. import errno
  4. import argparse
  5. import numpy as np
  6. import cv2
  7. import tensorflow.compat.v1 as tf
  8. import torch
  9. #tf.compat.v1.disable_eager_execution()
  10. physical_devices = tf.config.experimental.list_physical_devices('GPU')
  11. if len(physical_devices) > 0:
  12. tf.config.experimental.set_visible_devices(physical_devices[-1], 'GPU')
  13. tf.config.experimental.set_memory_growth(physical_devices[-1], True)
  14. # physical_devices = torch.cuda.device_count()
  15. # if len(physical_devices) > 0:
  16. # os.environ["CUDA_VISIBLE_DEVICES"] = "3"
  17. class Dummy:
  18. def __init__(self, video:str, output:str="./io_data/output/output.avi", coco_names_path:str ="./io_data/input/classes/coco.names", output_format:str='XVID',
  19. iou:float=0.45, score:bool=0.5, dont_show:bool=False, count:bool=False):
  20. '''
  21. args:
  22. video: path to input video or set to 0 for webcam
  23. output: path to output video
  24. iou: IOU threshold
  25. score: Matching score threshold
  26. dont_show: dont show video output
  27. count: count objects being tracked on screen
  28. coco_file_path: File wich contains the path to coco naames
  29. '''
  30. self.video = video
  31. self.output = output
  32. self.output_format = output_format
  33. self.count = count
  34. self.iou = iou
  35. self.dont_show = dont_show
  36. self.score = score
  37. self.coco_names_path = coco_names_path
  38. def _run_in_batches(f, data_dict, out, batch_size):
  39. data_len = len(out)
  40. num_batches = int(data_len / batch_size)
  41. s, e = 0, 0
  42. for i in range(num_batches):
  43. s, e = i * batch_size, (i + 1) * batch_size
  44. batch_data_dict = {k: v[s:e] for k, v in data_dict.items()}
  45. out[s:e] = f(batch_data_dict)
  46. if e < len(out):
  47. batch_data_dict = {k: v[e:] for k, v in data_dict.items()}
  48. out[e:] = f(batch_data_dict)
  49. def extract_image_patch(image, bbox, patch_shape):
  50. """Extract image patch from bounding box.
  51. Parameters
  52. ----------
  53. image : ndarray
  54. The full image.
  55. bbox : array_like
  56. The bounding box in format (x, y, width, height).
  57. patch_shape : Optional[array_like]
  58. This parameter can be used to enforce a desired patch shape
  59. (height, width). First, the `bbox` is adapted to the aspect ratio
  60. of the patch shape, then it is clipped at the image boundaries.
  61. If None, the shape is computed from :arg:`bbox`.
  62. Returns
  63. -------
  64. ndarray | NoneType
  65. An image patch showing the :arg:`bbox`, optionally reshaped to
  66. :arg:`patch_shape`.
  67. Returns None if the bounding box is empty or fully outside of the image
  68. boundaries.
  69. """
  70. bbox = np.array(bbox)
  71. if patch_shape is not None:
  72. # correct aspect ratio to patch shape
  73. target_aspect = float(patch_shape[1]) / patch_shape[0]
  74. new_width = target_aspect * bbox[3]
  75. bbox[0] -= (new_width - bbox[2]) / 2
  76. bbox[2] = new_width
  77. # convert to top left, bottom right
  78. bbox[2:] += bbox[:2]
  79. bbox = bbox.astype(np.int)
  80. # clip at image boundaries
  81. bbox[:2] = np.maximum(0, bbox[:2])
  82. bbox[2:] = np.minimum(np.asarray(image.shape[:2][::-1]) - 1, bbox[2:])
  83. if np.any(bbox[:2] >= bbox[2:]):
  84. return None
  85. sx, sy, ex, ey = bbox
  86. image = image[sy:ey, sx:ex]
  87. image = cv2.resize(image, tuple(patch_shape[::-1]))
  88. return image
  89. class ImageEncoder(object):
  90. def __init__(self, checkpoint_filename, input_name="images",
  91. output_name="features"):
  92. self.session = tf.Session()
  93. with tf.gfile.GFile(checkpoint_filename, "rb") as file_handle:
  94. graph_def = tf.GraphDef()
  95. graph_def.ParseFromString(file_handle.read())
  96. tf.import_graph_def(graph_def, name="net")
  97. self.input_var = tf.get_default_graph().get_tensor_by_name(
  98. "%s:0" % input_name)
  99. self.output_var = tf.get_default_graph().get_tensor_by_name(
  100. "%s:0" % output_name)
  101. assert len(self.output_var.get_shape()) == 2
  102. assert len(self.input_var.get_shape()) == 4
  103. self.feature_dim = self.output_var.get_shape().as_list()[-1]
  104. self.image_shape = self.input_var.get_shape().as_list()[1:]
  105. def __call__(self, data_x, batch_size=32):
  106. out = np.zeros((len(data_x), self.feature_dim), np.float32)
  107. _run_in_batches(
  108. lambda x: self.session.run(self.output_var, feed_dict=x),
  109. {self.input_var: data_x}, out, batch_size)
  110. return out
  111. def create_box_encoder(model_filename, input_name="images",
  112. output_name="features", batch_size=32):
  113. image_encoder = ImageEncoder(model_filename, input_name, output_name)
  114. image_shape = image_encoder.image_shape
  115. def encoder(image, boxes):
  116. image_patches = []
  117. for box in boxes:
  118. patch = extract_image_patch(image, box, image_shape[:2])
  119. if patch is None:
  120. print("WARNING: Failed to extract image patch: %s." % str(box))
  121. patch = np.random.uniform(
  122. 0., 255., image_shape).astype(np.uint8)
  123. image_patches.append(patch)
  124. image_patches = np.asarray(image_patches)
  125. return image_encoder(image_patches, batch_size)
  126. return encoder
  127. def generate_detections(encoder, mot_dir, output_dir, detection_dir=None):
  128. """Generate detections with features.
  129. Parameters
  130. ----------
  131. encoder : Callable[image, ndarray] -> ndarray
  132. The encoder function takes as input a BGR color image and a matrix of
  133. bounding boxes in format `(x, y, w, h)` and returns a matrix of
  134. corresponding feature vectors.
  135. mot_dir : str
  136. Path to the MOTChallenge directory (can be either train or test).
  137. output_dir
  138. Path to the output directory. Will be created if it does not exist.
  139. detection_dir
  140. Path to custom detections. The directory structure should be the default
  141. MOTChallenge structure: `[sequence]/det/det.txt`. If None, uses the
  142. standard MOTChallenge detections.
  143. """
  144. if detection_dir is None:
  145. detection_dir = mot_dir
  146. try:
  147. os.makedirs(output_dir)
  148. except OSError as exception:
  149. if exception.errno == errno.EEXIST and os.path.isdir(output_dir):
  150. pass
  151. else:
  152. raise ValueError(
  153. "Failed to created output directory '%s'" % output_dir)
  154. for sequence in os.listdir(mot_dir):
  155. print("Processing %s" % sequence)
  156. sequence_dir = os.path.join(mot_dir, sequence)
  157. image_dir = os.path.join(sequence_dir, "img1")
  158. image_filenames = {
  159. int(os.path.splitext(f)[0]): os.path.join(image_dir, f)
  160. for f in os.listdir(image_dir)}
  161. detection_file = os.path.join(
  162. detection_dir, sequence, "det/det.txt")
  163. detections_in = np.loadtxt(detection_file, delimiter=',')
  164. detections_out = []
  165. frame_indices = detections_in[:, 0].astype(np.int)
  166. min_frame_idx = frame_indices.astype(np.int).min()
  167. max_frame_idx = frame_indices.astype(np.int).max()
  168. for frame_idx in range(min_frame_idx, max_frame_idx + 1):
  169. print("Frame %05d/%05d" % (frame_idx, max_frame_idx))
  170. mask = frame_indices == frame_idx
  171. rows = detections_in[mask]
  172. if frame_idx not in image_filenames:
  173. print("WARNING could not find image for frame %d" % frame_idx)
  174. continue
  175. bgr_image = cv2.imread(
  176. image_filenames[frame_idx], cv2.IMREAD_COLOR)
  177. features = encoder(bgr_image, rows[:, 2:6].copy())
  178. detections_out += [np.r_[(row, feature)] for row, feature
  179. in zip(rows, features)]
  180. output_filename = os.path.join(output_dir, "%s.npy" % sequence)
  181. np.save(
  182. output_filename, np.asarray(detections_out), allow_pickle=False)
  183. def parse_args():
  184. """Parse command line arguments.
  185. """
  186. parser = argparse.ArgumentParser(description="Re-ID feature extractor")
  187. parser.add_argument(
  188. "--model",
  189. default="resources/networks/mars-small128.pb",
  190. help="Path to freezed inference graph protobuf.")
  191. parser.add_argument(
  192. "--mot_dir", help="Path to MOTChallenge directory (train or test)",
  193. required=True)
  194. parser.add_argument(
  195. "--detection_dir", help="Path to custom detections. Defaults to "
  196. "standard MOT detections Directory structure should be the default "
  197. "MOTChallenge structure: [sequence]/det/det.txt", default=None)
  198. parser.add_argument(
  199. "--output_dir", help="Output directory. Will be created if it does not"
  200. " exist.", default="detections")
  201. return parser.parse_args()
  202. def main():
  203. args = parse_args()
  204. encoder = create_box_encoder(args.model, batch_size=32)
  205. generate_detections(encoder, args.mot_dir, args.output_dir,
  206. args.detection_dir)
  207. def read_class_names():
  208. '''
  209. Raad COCO classes names
  210. '''
  211. classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
  212. 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
  213. 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
  214. 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
  215. 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
  216. 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
  217. 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
  218. 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
  219. 'hair drier', 'toothbrush']
  220. return dict(zip(range(len(classes)), classes))
  221. if __name__ == "__main__":
  222. main()