add_nms.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. import numpy as np
  2. import onnx
  3. from onnx import shape_inference
  4. try:
  5. import onnx_graphsurgeon as gs
  6. except Exception as e:
  7. print('Import onnx_graphsurgeon failure: %s' % e)
  8. import logging
  9. LOGGER = logging.getLogger(__name__)
  10. class RegisterNMS(object):
  11. def __init__(
  12. self,
  13. onnx_model_path: str,
  14. precision: str = "fp32",
  15. ):
  16. self.graph = gs.import_onnx(onnx.load(onnx_model_path))
  17. assert self.graph
  18. LOGGER.info("ONNX graph created successfully")
  19. # Fold constants via ONNX-GS that PyTorch2ONNX may have missed
  20. self.graph.fold_constants()
  21. self.precision = precision
  22. self.batch_size = 1
  23. def infer(self):
  24. """
  25. Sanitize the graph by cleaning any unconnected nodes, do a topological resort,
  26. and fold constant inputs values. When possible, run shape inference on the
  27. ONNX graph to determine tensor shapes.
  28. """
  29. for _ in range(3):
  30. count_before = len(self.graph.nodes)
  31. self.graph.cleanup().toposort()
  32. try:
  33. for node in self.graph.nodes:
  34. for o in node.outputs:
  35. o.shape = None
  36. model = gs.export_onnx(self.graph)
  37. model = shape_inference.infer_shapes(model)
  38. self.graph = gs.import_onnx(model)
  39. except Exception as e:
  40. LOGGER.info(f"Shape inference could not be performed at this time:\n{e}")
  41. try:
  42. self.graph.fold_constants(fold_shapes=True)
  43. except TypeError as e:
  44. LOGGER.error(
  45. "This version of ONNX GraphSurgeon does not support folding shapes, "
  46. f"please upgrade your onnx_graphsurgeon module. Error:\n{e}"
  47. )
  48. raise
  49. count_after = len(self.graph.nodes)
  50. if count_before == count_after:
  51. # No new folding occurred in this iteration, so we can stop for now.
  52. break
  53. def save(self, output_path):
  54. """
  55. Save the ONNX model to the given location.
  56. Args:
  57. output_path: Path pointing to the location where to write
  58. out the updated ONNX model.
  59. """
  60. self.graph.cleanup().toposort()
  61. model = gs.export_onnx(self.graph)
  62. onnx.save(model, output_path)
  63. LOGGER.info(f"Saved ONNX model to {output_path}")
  64. def register_nms(
  65. self,
  66. *,
  67. score_thresh: float = 0.25,
  68. nms_thresh: float = 0.45,
  69. detections_per_img: int = 100,
  70. ):
  71. """
  72. Register the ``EfficientNMS_TRT`` plugin node.
  73. NMS expects these shapes for its input tensors:
  74. - box_net: [batch_size, number_boxes, 4]
  75. - class_net: [batch_size, number_boxes, number_labels]
  76. Args:
  77. score_thresh (float): The scalar threshold for score (low scoring boxes are removed).
  78. nms_thresh (float): The scalar threshold for IOU (new boxes that have high IOU
  79. overlap with previously selected boxes are removed).
  80. detections_per_img (int): Number of best detections to keep after NMS.
  81. """
  82. self.infer()
  83. # Find the concat node at the end of the network
  84. op_inputs = self.graph.outputs
  85. op = "EfficientNMS_TRT"
  86. attrs = {
  87. "plugin_version": "1",
  88. "background_class": -1, # no background class
  89. "max_output_boxes": detections_per_img,
  90. "score_threshold": score_thresh,
  91. "iou_threshold": nms_thresh,
  92. "score_activation": False,
  93. "box_coding": 0,
  94. }
  95. if self.precision == "fp32":
  96. dtype_output = np.float32
  97. elif self.precision == "fp16":
  98. dtype_output = np.float16
  99. else:
  100. raise NotImplementedError(f"Currently not supports precision: {self.precision}")
  101. # NMS Outputs
  102. output_num_detections = gs.Variable(
  103. name="num_dets",
  104. dtype=np.int32,
  105. shape=[self.batch_size, 1],
  106. ) # A scalar indicating the number of valid detections per batch image.
  107. output_boxes = gs.Variable(
  108. name="det_boxes",
  109. dtype=dtype_output,
  110. shape=[self.batch_size, detections_per_img, 4],
  111. )
  112. output_scores = gs.Variable(
  113. name="det_scores",
  114. dtype=dtype_output,
  115. shape=[self.batch_size, detections_per_img],
  116. )
  117. output_labels = gs.Variable(
  118. name="det_classes",
  119. dtype=np.int32,
  120. shape=[self.batch_size, detections_per_img],
  121. )
  122. op_outputs = [output_num_detections, output_boxes, output_scores, output_labels]
  123. # Create the NMS Plugin node with the selected inputs. The outputs of the node will also
  124. # become the final outputs of the graph.
  125. self.graph.layer(op=op, name="batched_nms", inputs=op_inputs, outputs=op_outputs, attrs=attrs)
  126. LOGGER.info(f"Created NMS plugin '{op}' with attributes: {attrs}")
  127. self.graph.outputs = op_outputs
  128. self.infer()
  129. def save(self, output_path):
  130. """
  131. Save the ONNX model to the given location.
  132. Args:
  133. output_path: Path pointing to the location where to write
  134. out the updated ONNX model.
  135. """
  136. self.graph.cleanup().toposort()
  137. model = gs.export_onnx(self.graph)
  138. onnx.save(model, output_path)
  139. LOGGER.info(f"Saved ONNX model to {output_path}")