123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191 |
- from importlib import import_module
- import os
- from flask import Flask, render_template, Response, request, send_file
- import cv2
- import subprocess
- import time
- import sys
- import logging
- import pdb
- sys.path.append('./')
- logger = logging.getLogger(__name__)
- import torch
- from models.common import *
- from models.experimental import *
- from models.yolo import *
- from utils.autoanchor import check_anchor_order
- from utils.general import make_divisible, check_file, set_logging, check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
- scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
- from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
- select_device, copy_attr
- from utils.loss import SigmoidBin
- app = Flask(__name__)
- PATH_WEIGHT = './models/best.pt'
- img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')
- class Net:
- def __init__(self, device='cuda'):
- tstamp = time.time()
- self.device = select_device(device)
- print('[yolo] loading with', self.device)
-
-
- self.net = attempt_load(PATH_WEIGHT).cuda()
-
- self.net.eval()
- print('[yolo] finished loading (%.4f sec)' % (time.time() - tstamp))
- def detect_faces(self, image, conf_th=0.8, scales=[1]):
-
- w, h = image.shape[1], image.shape[0]
-
-
- bboxes = np.empty(shape=(0, 5))
- with torch.no_grad():
- for s in scales:
-
- scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
- scaled_img = np.swapaxes(scaled_img, 1, 2)
- scaled_img = np.swapaxes(scaled_img, 1, 0)
-
- scaled_img = scaled_img.astype('float32')
-
-
- x = torch.FloatTensor(scaled_img).unsqueeze(0).to(self.device)
-
-
-
- y = self.net(x)[0]
- y = non_max_suppression(y)
-
-
- for i, det in enumerate(y):
-
-
- if len(det):
-
- det[:, :4] = scale_coords(x.shape[2:], det[:, :4], x.shape).round()
-
- for c in det[:, -1].unique():
- n = (det[:, -1] == c).sum()
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- detections = y
- scale = torch.Tensor([w, h, w, h])
-
-
-
-
-
-
-
-
-
-
-
- return bboxes
-
- def get_stream_video():
-
-
- cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
- model = Net()
-
-
- while True:
-
- success, frame = cam.read()
-
-
- if not success:
- break
- else:
-
-
-
-
-
- image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
- image_np = cv2.resize(image_np, (640, 384))
-
- frame = model.detect_faces(image_np).tobytes()
-
-
-
-
- yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(frame) + b'\r\n')
- @app.route('/')
- def index():
- return render_template('index.html')
- @app.route("/video", methods=['GET'])
- def video():
-
-
- return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
- @app.route("/stream", methods=['GET'])
- def stream():
- print("here")
- result = subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', '/root/helmet_det/yolov7-main/models/best.pt'])
- print(result)
- return result
- if __name__ == '__main__':
- app.run(host='0.0.0.0', port=5000, debug=True)
|