|
@@ -15,7 +15,8 @@ from models.common import *
|
|
|
from models.experimental import *
|
|
|
from models.yolo import *
|
|
|
from utils.autoanchor import check_anchor_order
|
|
|
-from utils.general import make_divisible, check_file, set_logging
|
|
|
+from utils.general import make_divisible, check_file, set_logging, check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
|
|
|
+ scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
|
|
|
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
|
|
|
select_device, copy_attr
|
|
|
from utils.loss import SigmoidBin
|
|
@@ -44,79 +45,121 @@ PATH_WEIGHT = './models/best.pt'
|
|
|
img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')
|
|
|
|
|
|
|
|
|
-class Net():
|
|
|
+class Net:
|
|
|
def __init__(self, device='cuda'):
|
|
|
tstamp = time.time()
|
|
|
self.device = select_device(device)
|
|
|
print('[yolo] loading with', self.device)
|
|
|
- self.net = Model('/root/helmet_det/yolov7-main/cfg/training/yolov7.yaml').to(self.device)
|
|
|
- state_dict = torch.load(PATH_WEIGHT, map_location=self.device)['model'].state_dict()
|
|
|
- self.net.load_state_dict(state_dict)
|
|
|
+ # self.net = Model('/root/helmet_det/yolov7-main/cfg/training/yolov7_custom.yaml').to(self.device)
|
|
|
+ # state_dict = torch.load(PATH_WEIGHT, map_location=self.device)['model'].state_dict()
|
|
|
+ self.net = attempt_load(PATH_WEIGHT).cuda()
|
|
|
+ # self.net.load_state_dict(state_dict)
|
|
|
self.net.eval()
|
|
|
print('[yolo] finished loading (%.4f sec)' % (time.time() - tstamp))
|
|
|
|
|
|
def detect_faces(self, image, conf_th=0.8, scales=[1]):
|
|
|
- print(image, image.shape)
|
|
|
- print(len(image), len(image.shape))
|
|
|
- print('*'*30)
|
|
|
+
|
|
|
w, h = image.shape[1], image.shape[0]
|
|
|
+ # print(w,h)
|
|
|
+ # pdb.set_trace()
|
|
|
bboxes = np.empty(shape=(0, 5))
|
|
|
|
|
|
with torch.no_grad():
|
|
|
for s in scales:
|
|
|
+ # print(image, image.shape)
|
|
|
scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
|
|
|
-
|
|
|
scaled_img = np.swapaxes(scaled_img, 1, 2)
|
|
|
scaled_img = np.swapaxes(scaled_img, 1, 0)
|
|
|
- scaled_img = scaled_img[[2, 1, 0], :, :]
|
|
|
- scaled_img = scaled_img.astype('float32')
|
|
|
- scaled_img -= img_mean
|
|
|
- scaled_img = scaled_img[[2, 1, 0], :, :]
|
|
|
- x = torch.from_numpy(scaled_img).unsqueeze(0).to(self.device)
|
|
|
- y = self.net(x)
|
|
|
+ # scaled_img = scaled_img[[2, 1, 0], :, :]
|
|
|
|
|
|
- detections = y.data
|
|
|
+ scaled_img = scaled_img.astype('float32')
|
|
|
+ # scaled_img -= img_mean
|
|
|
+ # scaled_img = scaled_img[[2, 1, 0], :, :]
|
|
|
+ x = torch.FloatTensor(scaled_img).unsqueeze(0).to(self.device)
|
|
|
+ #x = x.permute(0,3,1,2) # (B, W, H, C) --> (B, C, W, H)
|
|
|
+ # x = torch.from_numpy(scaled_img).to(self.device)
|
|
|
+ # pdb.set_trace()
|
|
|
+ y = self.net(x)[0]
|
|
|
+ y = non_max_suppression(y)
|
|
|
+
|
|
|
+ # Process detections
|
|
|
+ for i, det in enumerate(y): # detections per image
|
|
|
+
|
|
|
+ # gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh
|
|
|
+ if len(det):
|
|
|
+ # Rescale boxes from img_size to im0 size
|
|
|
+ det[:, :4] = scale_coords(x.shape[2:], det[:, :4], x.shape).round()
|
|
|
+
|
|
|
+ # Print results
|
|
|
+ for c in det[:, -1].unique():
|
|
|
+ n = (det[:, -1] == c).sum() # detections per class
|
|
|
+ # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
|
|
|
+
|
|
|
+ # # Write results
|
|
|
+ # for *xyxy, conf, cls in reversed(det):
|
|
|
+ # if save_txt: # Write to file
|
|
|
+ # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
|
|
|
+ # line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh) # label format
|
|
|
+ # with open(txt_path + '.txt', 'a') as f:
|
|
|
+ # f.write(('%g ' * len(line)).rstrip() % line + '\n')
|
|
|
+
|
|
|
+ # if save_img or view_img: # Add bbox to image
|
|
|
+ # label = f'{names[int(cls)]} {conf:.2f}'
|
|
|
+ # plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
|
|
|
+
|
|
|
+ # Print time (inference + NMS)
|
|
|
+ # print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
|
|
|
+
|
|
|
+ # 교수님 코드
|
|
|
+ # detections = y.data
|
|
|
+ detections = y
|
|
|
scale = torch.Tensor([w, h, w, h])
|
|
|
-
|
|
|
- for i in range(detections.size(1)):
|
|
|
- j = 0
|
|
|
- while detections[0, i, j, 0] > conf_th:
|
|
|
- score = detections[0, i, j, 0]
|
|
|
- pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
|
|
|
- bbox = (pt[0], pt[1], pt[2], pt[3], score)
|
|
|
- bboxes = np.vstack((bboxes, bbox))
|
|
|
- j += 1
|
|
|
+
|
|
|
+ # for i in range(detections.size(1)):
|
|
|
+ # j = 0
|
|
|
+ # while detections[0, i, j, 0] > conf_th:
|
|
|
+ # score = detections[0, i, j, 0]
|
|
|
+ # pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
|
|
|
+ # bbox = (pt[0], pt[1], pt[2], pt[3], score)
|
|
|
+ # bboxes = np.vstack((bboxes, bbox))
|
|
|
+ # j += 1
|
|
|
|
|
|
# keep = nms_(bboxes, 0.1) ## nms?
|
|
|
# bboxes = bboxes[keep]
|
|
|
-
|
|
|
return bboxes
|
|
|
-
|
|
|
+ # return y
|
|
|
|
|
|
|
|
|
def get_stream_video():
|
|
|
# camera 정의
|
|
|
+
|
|
|
cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
|
|
|
|
|
|
model = Net()
|
|
|
-
|
|
|
+ # print(model)
|
|
|
+ # pdb.set_trace()
|
|
|
while True:
|
|
|
# 카메라 값 불러오기
|
|
|
- success, frame = cam.read()
|
|
|
+ success, frame = cam.read()
|
|
|
# print(frame)
|
|
|
# print(type(frame))
|
|
|
if not success:
|
|
|
break
|
|
|
else:
|
|
|
- ret, buffer = cv2.imencode('.jpeg', frame)
|
|
|
# frame을 byte로 변경 후 특정 식??으로 변환 후에
|
|
|
# yield로 하나씩 넘겨준다.
|
|
|
|
|
|
- decode_img = cv2.imdecode(buffer, 1)
|
|
|
- frame = model.detect_faces(decode_img).tobytes()
|
|
|
+ # ret, buffer = cv2.imencode('.jpeg', frame)
|
|
|
+ # decode_img = cv2.imdecode(buffer, 1)
|
|
|
+ # frame = cv2.imencode(model.detect_faces(buffer)).tobytes()
|
|
|
+ image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
|
|
+ image_np = cv2.resize(image_np, (640, 384))
|
|
|
+ # pdb.set_trace()
|
|
|
+ frame = model.detect_faces(image_np).tobytes()
|
|
|
+
|
|
|
# frame = buffer.tobytes()
|
|
|
- print(type(frame))
|
|
|
- pdb.set_trace()
|
|
|
+ # print(type(frame))
|
|
|
+ # pdb.set_trace()
|
|
|
yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(frame) + b'\r\n')
|
|
|
|
|
|
|
|
@@ -126,7 +169,7 @@ def index():
|
|
|
|
|
|
|
|
|
# 스트리밍 경로를 /video 경로로 설정.
|
|
|
-@app.get("/video")
|
|
|
+@app.route("/video", methods=['GET'])
|
|
|
def video():
|
|
|
# StringResponse함수를 return하고,
|
|
|
# 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
|
|
@@ -137,8 +180,9 @@ def video():
|
|
|
@app.route("/stream", methods=['GET'])
|
|
|
def stream():
|
|
|
print("here")
|
|
|
- subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', 'best.pt'])
|
|
|
- return "done"
|
|
|
+ result = subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', '/root/helmet_det/yolov7-main/models/best.pt'])
|
|
|
+ print(result)
|
|
|
+ return result
|
|
|
# rtsp://astrodom:hdci12@192.168.170.73:554/stream1
|
|
|
|
|
|
|