app.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. from importlib import import_module
  2. import os
  3. from flask import Flask, render_template, Response, request, send_file
  4. import cv2
  5. import subprocess
  6. import time
  7. import sys
  8. import logging
  9. import pdb
  10. sys.path.append('./') # to run '$ python *.py' files in subdirectories
  11. logger = logging.getLogger(__name__)
  12. import torch
  13. from models.common import *
  14. from models.experimental import *
  15. from models.yolo import *
  16. from utils.autoanchor import check_anchor_order
  17. from utils.general import make_divisible, check_file, set_logging, check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
  18. scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
  19. from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
  20. select_device, copy_attr
  21. from utils.loss import SigmoidBin
  22. # import camera driver
  23. # from object_detection import VideoStreaming
  24. # if os.environ.get('CAMERA'):
  25. # Camera = import_module('camera_' + os.environ['CAMERA']).Camera
  26. # else:
  27. # from camera import Camera
  28. app = Flask(__name__)
  29. # def gen(camera):
  30. # while True:
  31. # frame = VideoStreaming.get_frame()
  32. # # cv2.imencode('.jpg', frame)
  33. # yield (b'--frame\r\n'
  34. # b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
  35. PATH_WEIGHT = './models/best.pt'
  36. img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')
  37. class Net:
  38. def __init__(self, device='cuda'):
  39. tstamp = time.time()
  40. self.device = select_device(device)
  41. print('[yolo] loading with', self.device)
  42. # self.net = Model('/root/helmet_det/yolov7-main/cfg/training/yolov7_custom.yaml').to(self.device)
  43. # state_dict = torch.load(PATH_WEIGHT, map_location=self.device)['model'].state_dict()
  44. self.net = attempt_load(PATH_WEIGHT).cuda()
  45. # self.net.load_state_dict(state_dict)
  46. self.net.eval()
  47. print('[yolo] finished loading (%.4f sec)' % (time.time() - tstamp))
  48. def detect_faces(self, image, conf_th=0.8, scales=[1]):
  49. w, h = image.shape[1], image.shape[0]
  50. # print(w,h)
  51. # pdb.set_trace()
  52. bboxes = np.empty(shape=(0, 5))
  53. with torch.no_grad():
  54. for s in scales:
  55. # print(image, image.shape)
  56. scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
  57. scaled_img = np.swapaxes(scaled_img, 1, 2)
  58. scaled_img = np.swapaxes(scaled_img, 1, 0)
  59. # scaled_img = scaled_img[[2, 1, 0], :, :]
  60. scaled_img = scaled_img.astype('float32')
  61. # scaled_img -= img_mean
  62. # scaled_img = scaled_img[[2, 1, 0], :, :]
  63. x = torch.FloatTensor(scaled_img).unsqueeze(0).to(self.device)
  64. #x = x.permute(0,3,1,2) # (B, W, H, C) --> (B, C, W, H)
  65. # x = torch.from_numpy(scaled_img).to(self.device)
  66. # pdb.set_trace()
  67. y = self.net(x)[0]
  68. y = non_max_suppression(y)
  69. # Process detections
  70. for i, det in enumerate(y): # detections per image
  71. # gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
  72. if len(det):
  73. # Rescale boxes from img_size to im0 size
  74. det[:, :4] = scale_coords(x.shape[2:], det[:, :4], x.shape).round()
  75. # Print results
  76. for c in det[:, -1].unique():
  77. n = (det[:, -1] == c).sum() # detections per class
  78. # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
  79. # # Write results
  80. # for *xyxy, conf, cls in reversed(det):
  81. # if save_txt: # Write to file
  82. # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
  83. # line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
  84. # with open(txt_path + '.txt', 'a') as f:
  85. # f.write(('%g ' * len(line)).rstrip() % line + '\n')
  86. # if save_img or view_img: # Add bbox to image
  87. # label = f'{names[int(cls)]} {conf:.2f}'
  88. # plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
  89. # Print time (inference + NMS)
  90. # print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
  91. # 교수님 코드
  92. # detections = y.data
  93. detections = y
  94. scale = torch.Tensor([w, h, w, h])
  95. # for i in range(detections.size(1)):
  96. # j = 0
  97. # while detections[0, i, j, 0] > conf_th:
  98. # score = detections[0, i, j, 0]
  99. # pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
  100. # bbox = (pt[0], pt[1], pt[2], pt[3], score)
  101. # bboxes = np.vstack((bboxes, bbox))
  102. # j += 1
  103. # keep = nms_(bboxes, 0.1) ## nms?
  104. # bboxes = bboxes[keep]
  105. return bboxes
  106. # return y
  107. def get_stream_video():
  108. # camera 정의
  109. cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
  110. model = Net()
  111. # print(model)
  112. # pdb.set_trace()
  113. while True:
  114. # 카메라 값 불러오기
  115. success, frame = cam.read()
  116. # print(frame)
  117. # print(type(frame))
  118. if not success:
  119. break
  120. else:
  121. # frame을 byte로 변경 후 특정 식??으로 변환 후에
  122. # yield로 하나씩 넘겨준다.
  123. # ret, buffer = cv2.imencode('.jpeg', frame)
  124. # decode_img = cv2.imdecode(buffer, 1)
  125. # frame = cv2.imencode(model.detect_faces(buffer)).tobytes()
  126. image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  127. image_np = cv2.resize(image_np, (640, 384))
  128. # pdb.set_trace()
  129. frame = model.detect_faces(image_np).tobytes()
  130. # frame = buffer.tobytes()
  131. # print(type(frame))
  132. # pdb.set_trace()
  133. yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(frame) + b'\r\n')
  134. @app.route('/')
  135. def index():
  136. return render_template('index.html')
  137. # 스트리밍 경로를 /video 경로로 설정.
  138. @app.route("/video", methods=['GET'])
  139. def video():
  140. # StringResponse함수를 return하고,
  141. # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
  142. return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
  143. # ipcam 열기
  144. @app.route("/stream", methods=['GET'])
  145. def stream():
  146. print("here")
  147. result = subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', '/root/helmet_det/yolov7-main/models/best.pt'])
  148. print(result)
  149. return result
  150. # rtsp://astrodom:hdci12@192.168.170.73:554/stream1
  151. if __name__ == '__main__':
  152. app.run(host='0.0.0.0', port=5000, debug=True)