Parcourir la source

feat : web streaming

yjlim il y a 2 ans
Parent
commit
2eebc23773

+ 81 - 37
yolov7-main/app.py

@@ -15,7 +15,8 @@ from models.common import *
 from models.experimental import *
 from models.yolo import *
 from utils.autoanchor import check_anchor_order
-from utils.general import make_divisible, check_file, set_logging
+from utils.general import make_divisible, check_file, set_logging, check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
+    scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
 from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
     select_device, copy_attr
 from utils.loss import SigmoidBin
@@ -44,79 +45,121 @@ PATH_WEIGHT = './models/best.pt'
 img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')
 
 
-class Net():
+class Net:
     def __init__(self, device='cuda'):
         tstamp = time.time()
         self.device = select_device(device)
         print('[yolo] loading with', self.device)
-        self.net = Model('/root/helmet_det/yolov7-main/cfg/training/yolov7.yaml').to(self.device)
-        state_dict = torch.load(PATH_WEIGHT, map_location=self.device)['model'].state_dict()
-        self.net.load_state_dict(state_dict)
+        # self.net = Model('/root/helmet_det/yolov7-main/cfg/training/yolov7_custom.yaml').to(self.device)
+        # state_dict = torch.load(PATH_WEIGHT, map_location=self.device)['model'].state_dict()
+        self.net = attempt_load(PATH_WEIGHT).cuda()
+        # self.net.load_state_dict(state_dict)
         self.net.eval()
         print('[yolo] finished loading (%.4f sec)' % (time.time() - tstamp))
 
     def detect_faces(self, image, conf_th=0.8, scales=[1]):
-        print(image, image.shape)
-        print(len(image), len(image.shape))
-        print('*'*30)
+        
         w, h = image.shape[1], image.shape[0]
+        # print(w,h)
+        # pdb.set_trace()
         bboxes = np.empty(shape=(0, 5))
 
         with torch.no_grad():
             for s in scales:
+                # print(image, image.shape)
                 scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
-
                 scaled_img = np.swapaxes(scaled_img, 1, 2)
                 scaled_img = np.swapaxes(scaled_img, 1, 0)
-                scaled_img = scaled_img[[2, 1, 0], :, :]
-                scaled_img = scaled_img.astype('float32')
-                scaled_img -= img_mean
-                scaled_img = scaled_img[[2, 1, 0], :, :]
-                x = torch.from_numpy(scaled_img).unsqueeze(0).to(self.device)
-                y = self.net(x)
+                # scaled_img = scaled_img[[2, 1, 0], :, :]
 
-                detections = y.data
+                scaled_img = scaled_img.astype('float32')
+                # scaled_img -= img_mean
+                # scaled_img = scaled_img[[2, 1, 0], :, :]
+                x = torch.FloatTensor(scaled_img).unsqueeze(0).to(self.device)
+                #x = x.permute(0,3,1,2) # (B, W, H, C) --> (B, C, W, H)
+                # x = torch.from_numpy(scaled_img).to(self.device)
+                # pdb.set_trace()
+                y = self.net(x)[0]
+                y = non_max_suppression(y)
+                
+                # Process detections
+                for i, det in enumerate(y):  # detections per image
+    
+                    # gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
+                    if len(det):
+                        # Rescale boxes from img_size to im0 size
+                        det[:, :4] = scale_coords(x.shape[2:], det[:, :4], x.shape).round()
+
+                        # Print results
+                        for c in det[:, -1].unique():
+                            n = (det[:, -1] == c).sum()  # detections per class
+                            # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string
+
+                        # # Write results
+                        # for *xyxy, conf, cls in reversed(det):
+                        #     if save_txt:  # Write to file
+                        #         xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
+                        #         line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh)  # label format
+                        #         with open(txt_path + '.txt', 'a') as f:
+                        #             f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+                        #     if save_img or view_img:  # Add bbox to image
+                        #         label = f'{names[int(cls)]} {conf:.2f}'
+                        #         plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
+                        
+                    # Print time (inference + NMS)
+                    # print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
+
+                # 교수님 코드
+                # detections = y.data
+                detections = y
                 scale = torch.Tensor([w, h, w, h])
-
-                for i in range(detections.size(1)):
-                    j = 0
-                    while detections[0, i, j, 0] > conf_th:
-                        score = detections[0, i, j, 0]
-                        pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
-                        bbox = (pt[0], pt[1], pt[2], pt[3], score)
-                        bboxes = np.vstack((bboxes, bbox))
-                        j += 1
+                
+                # for i in range(detections.size(1)):
+                #     j = 0
+                #     while detections[0, i, j, 0] > conf_th:
+                #         score = detections[0, i, j, 0]
+                #         pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
+                #         bbox = (pt[0], pt[1], pt[2], pt[3], score)
+                #         bboxes = np.vstack((bboxes, bbox))
+                #         j += 1
 
             # keep = nms_(bboxes, 0.1) ## nms?
             # bboxes = bboxes[keep]
-
         return bboxes
-
+        # return y
 
 
 def get_stream_video():
     # camera 정의
+    
     cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
 
     model = Net()    
-
+    # print(model)
+    # pdb.set_trace()
     while True:
         # 카메라 값 불러오기
-        success, frame = cam.read()
+        success, frame = cam.read() 
         # print(frame)
         # print(type(frame))
         if not success:
             break
         else:
-            ret, buffer = cv2.imencode('.jpeg', frame)
             # frame을 byte로 변경 후 특정 식??으로 변환 후에
             # yield로 하나씩 넘겨준다.
 
-            decode_img = cv2.imdecode(buffer, 1)
-            frame = model.detect_faces(decode_img).tobytes()
+            # ret, buffer = cv2.imencode('.jpeg', frame)
+            # decode_img = cv2.imdecode(buffer, 1)
+            # frame = cv2.imencode(model.detect_faces(buffer)).tobytes()
+            image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+            image_np = cv2.resize(image_np, (640, 384))
+            # pdb.set_trace()
+            frame = model.detect_faces(image_np).tobytes()
+            
             # frame = buffer.tobytes()
-            print(type(frame))
-            pdb.set_trace()
+            # print(type(frame))
+            # pdb.set_trace()
             yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(frame) + b'\r\n')
 
 
@@ -126,7 +169,7 @@ def index():
 
 
 # 스트리밍 경로를 /video 경로로 설정.
-@app.get("/video")
+@app.route("/video", methods=['GET'])
 def video():
     # StringResponse함수를 return하고,
     # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
@@ -137,8 +180,9 @@ def video():
 @app.route("/stream", methods=['GET'])
 def stream():
     print("here")
-    subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', 'best.pt'])
-    return "done"
+    result = subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', '/root/helmet_det/yolov7-main/models/best.pt'])
+    print(result)
+    return result
 # rtsp://astrodom:hdci12@192.168.170.73:554/stream1
 
 

+ 110 - 61
yolov7-main/app_.py

@@ -1,76 +1,125 @@
+#!/usr/bin/env python
 from importlib import import_module
 import os
-from flask import Flask, render_template, Response, request, send_file
+from flask import Flask, render_template, Response
 import cv2
-import subprocess
-# import camera driver
-# from object_detection import VideoStreaming
-from detect import frame_
-
-# if os.environ.get('CAMERA'):
-#     Camera = import_module('camera_' + os.environ['CAMERA']).Camera
-# else:
-#     from camera import Camera
-
+import torch
+from utils.torch_utils import *
+from utils.general import check_img_size, non_max_suppression, scale_coords
+from models.experimental import attempt_load
+from models.yolo import Model
+from utils.datasets import LoadStreams, LoadImages
+import random
+from utils.plots import plot_one_box
 
 app = Flask(__name__)
 
-
-# def gen(camera):
-
-#     while True:
-#         frame = VideoStreaming.get_frame()
-#         # cv2.imencode('.jpg', frame)
-
-#         yield (b'--frame\r\n'
-#                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
-
-
-def get_stream_video():
-    # camera 정의
-    cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
-
-    while True:
-        # 카메라 값 불러오기
-        # f = frame_()
-        # print(f.inference())
-        
-        success, frame = cam.read()
-        # print(frame)
-        # print(type(frame))
-        if not success:
-            break
-        else:
-            ret, buffer = cv2.imencode('.jpeg', frame)
-            # frame을 byte로 변경 후 특정 식??으로 변환 후에
-            # yield로 하나씩 넘겨준다.
-            frame = buffer.tobytes()
-            yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
-               bytearray(frame) + b'\r\n')
-
-
 @app.route('/')
 def index():
+    """Video streaming home page."""
     return render_template('index.html')
 
 
-# 스트리밍 경로를 /video 경로로 설정.
-@app.get("/video")
-def video():
-    # StringResponse함수를 return하고,
-    # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
-    return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
-
-
-# ipcam 열기
-@app.route("/stream", methods=['GET'])
 def stream():
-    print("here")
-    subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', 'best.pt'])
-    return "done"
-# rtsp://astrodom:hdci12@192.168.170.73:554/stream1
+    # WEIGHT = 'runs/train/yolov7-bsw2/weights/best.pt'
+    WEIGHT = '/root/Public/model_ssai_fine_20221209/ipark_1208_1305.pt'
+    # model = torch.load_state_dict(WEIGHT, map_location=select_device('0'))
+    model = attempt_load(WEIGHT, map_location='cuda:0').half()
+    # model = TracedModel(model, select_device('0'), img_size=640).half()
+
+    imgsz = 320
+    stride = int(model.stride.max())  # model stride
+    imgsz = check_img_size(imgsz, s=stride)  # check img_size
+    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
+
+    # Run inference
+    model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
+    # Get names and colors
+    names = model.module.names if hasattr(model, 'module') else model.names
+    colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
+    old_img_w = old_img_h = imgsz
+    old_img_b = 1
+
+    """Video streaming generator function."""
+    # cap = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
+    dataset = LoadStreams('rtsp://astrodom:hdci12@192.168.170.73:554/stream1', img_size=imgsz, stride=stride)
 
+    while True:
+        # ret, frame = cap.read()
+
+        # if not ret:
+        #     print('error')
+        #     break
+
+        for path, img, im0s, vid_cap in dataset:
+            img = torch.from_numpy(img).to(device)
+            img = img.half() if True else img.float()  # uint8 to fp16/32
+            img /= 255.0  # 0 - 255 to 0.0 - 1.0
+            if img.ndimension() == 3:
+                img = img.unsqueeze(0)
+
+            # Warmup
+            if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
+                old_img_b = img.shape[0]
+                old_img_h = img.shape[2]
+                old_img_w = img.shape[3]
+                for i in range(3):
+                    model(img, augment=True)[0]
+
+            with torch.no_grad():   # Calculating gradients would cause a GPU memory leak
+                pred = model(img, augment=True)[0]
+            # print(pred)
+            # print('*' * 30)
+
+            # Apply NMS
+            pred = non_max_suppression(pred, 0.40, 0.45)
+
+
+            # Process detections
+            for i, det in enumerate(pred):  # detections per image
+                if True:  # batch_size >= 1
+                    p, s, im0, frame = path[i], '%g: ' % i, im0s[i].copy(), dataset.count
+                # else:
+                #     p, s, im0, frame = path, '', im0s, getattr(dataset, 'frame', 0)
+
+                p = Path(p)  # to Path
+                # save_path = str(save_dir / p.name)  # img.jpg
+                # txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}')  # img.txt
+                # gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
+                if len(det):
+                    # Rescale boxes from img_size to im0 size
+                    det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
+
+                    # Print results
+                    for c in det[:, -1].unique():
+                        n = (det[:, -1] == c).sum()  # detections per class
+                        s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string
+
+                    # Write results
+                    for *xyxy, conf, cls in reversed(det):
+                    #     if save_txt:  # Write to file
+                    #         xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
+                    #         line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh)  # label format
+                    #         with open(txt_path + '.txt', 'a') as f:
+                    #             f.write(('%g ' * len(line)).rstrip() % line + '\n')
+
+                        if True:  # Add bbox to image
+                            label = f'{names[int(cls)]} {conf:.2f}'
+                            plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
+
+
+
+                # x = img.permute(0,3,1,2) # (B, W, H, C) --> (B, C, W, H)
+                image_bytes = cv2.imencode('.jpg', im0)[1].tobytes()
+                yield (b'--frame\r\n'
+                    b'Content-Type: image/jpeg\r\n\r\n' + image_bytes + b'\r\n')
+
+
+@app.route('/video')
+def video():
+    """Video streaming route. Put this in the src attribute of an img tag."""
+    return Response(stream(), content_type='multipart/x-mixed-replace; boundary=frame')
 
 
-if __name__ == '__main__':
-    app.run(host='0.0.0.0', debug=True)
+if __name__ == '__main__' :
+    app.run(host='0.0.0.0', threaded=True, debug=True)

+ 104 - 0
yolov7-main/base_camera.py

@@ -0,0 +1,104 @@
+import time
+import threading
+try:
+    from greenlet import getcurrent as get_ident
+except ImportError:
+    try:
+        from threading import get_ident
+    except ImportError:
+        from _thread import get_ident
+
+
+class CameraEvent(object):
+    """An Event-like class that signals all active clients when a new frame is
+    available.
+    """
+    def __init__(self):
+        self.events = {}
+
+    def wait(self):
+        """Invoked from each client's thread to wait for the next frame."""
+        ident = get_ident()
+        if ident not in self.events:
+            # this is a new client
+            # add an entry for it in the self.events dict
+            # each entry has two elements, a threading.Event() and a timestamp
+            self.events[ident] = [threading.Event(), time.time()]
+        return self.events[ident][0].wait()
+
+    def set(self):
+        """Invoked by the camera thread when a new frame is available."""
+        now = time.time()
+        remove = None
+        for ident, event in self.events.items():
+            if not event[0].isSet():
+                # if this client's event is not set, then set it
+                # also update the last set timestamp to now
+                event[0].set()
+                event[1] = now
+            else:
+                # if the client's event is already set, it means the client
+                # did not process a previous frame
+                # if the event stays set for more than 5 seconds, then assume
+                # the client is gone and remove it
+                if now - event[1] > 5:
+                    remove = ident
+        if remove:
+            del self.events[remove]
+
+    def clear(self):
+        """Invoked from each client's thread after a frame was processed."""
+        self.events[get_ident()][0].clear()
+
+
+class BaseCamera(object):
+    thread = None  # background thread that reads frames from camera
+    frame = None  # current frame is stored here by background thread
+    last_access = 0  # time of last client access to the camera
+    event = CameraEvent()
+
+    def __init__(self):
+        """Start the background camera thread if it isn't running yet."""
+        if BaseCamera.thread is None:
+            BaseCamera.last_access = time.time()
+
+            # start background frame thread
+            BaseCamera.thread = threading.Thread(target=self._thread)
+            BaseCamera.thread.start()
+
+            # wait until frames are available
+            while self.get_frame() is None:
+                time.sleep(0)
+
+    def get_frame(self):
+        """Return the current camera frame."""
+        BaseCamera.last_access = time.time()
+
+        # wait for a signal from the camera thread
+        BaseCamera.event.wait()
+        BaseCamera.event.clear()
+
+        return BaseCamera.frame
+
+    @staticmethod
+    def frames():
+        """"Generator that returns frames from the camera."""
+        raise RuntimeError('Must be implemented by subclasses.')
+
+    @classmethod
+    def _thread(cls):
+        """Camera background thread."""
+        print('Starting camera thread.')
+        frames_iterator = cls.frames()
+        for frame in frames_iterator:
+            BaseCamera.frame = frame
+            BaseCamera.event.set()  # send signal to clients
+            time.sleep(0)
+
+            # if there hasn't been any clients asking for frames in
+            # the last 10 seconds then stop the thread
+            if time.time() - BaseCamera.last_access > 10:
+                frames_iterator.close()
+                print('Stopping camera thread due to inactivity.')
+                break
+        BaseCamera.thread = None

+ 144 - 0
yolov7-main/camera.py

@@ -0,0 +1,144 @@
+import os
+import cv2
+from base_camera import BaseCamera
+import torch
+import torch.nn as nn
+import torchvision
+import numpy as np
+import argparse
+from utils.datasets import *
+from utils.plots import *
+from utils.general import *
+from utils.torch_utils import *
+
+
+def time_synchronized():
+    # pytorch-accurate time
+    if torch.cuda.is_available():
+        torch.cuda.synchronize()
+    return time.time()
+
+
+def select_device(device='', batch_size=None):
+    # device = 'cpu' or '0' or '0,1,2,3'
+    s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} '  # string
+    cpu = device.lower() == 'cpu'
+    if cpu:
+        os.environ['CUDA_VISIBLE_DEVICES'] = '-1'  # force torch.cuda.is_available() = False
+    elif device:  # non-cpu device requested
+        # os.environ['CUDA_VISIBLE_DEVICES'] = device  # set environment variable
+        assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested'  # check availability
+
+    cuda = not cpu and torch.cuda.is_available()
+    if cuda:
+        n = torch.cuda.device_count()
+        if n > 1 and batch_size:  # check that batch_size is compatible with device_count
+            assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'
+        space = ' ' * len(s)
+        for i, d in enumerate(device.split(',') if device else range(n)):
+            p = torch.cuda.get_device_properties(i)
+            s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n"  # bytes to MB
+    else:
+        s += 'CPU\n'
+
+    logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s)  # emoji-safe
+    return torch.device('cuda:0' if cuda else 'cpu')
+
+
+class Camera(BaseCamera):
+    video_source = 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1'
+
+    def __init__(self):
+        if os.environ.get('OPENCV_CAMERA_SOURCE'):
+            Camera.set_video_source(int(os.environ['OPENCV_CAMERA_SOURCE']))
+        super(Camera, self).__init__()
+
+    @staticmethod
+    def set_video_source(source):
+        Camera.video_source = source
+
+    @staticmethod
+    def frames():
+        out, weights, imgsz = \
+        'inference/output', 'models/best.pt', 640
+        source = 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1'
+        # device = torch_utils.select_device()
+        device = select_device()
+        if os.path.exists(out):
+            shutil.rmtree(out)  # delete output folder
+        os.makedirs(out)  # make new output folder
+
+        # Load model
+        # google_utils.attempt_download(weights)
+        model = torch.load(weights, map_location=device)['model']
+        
+        model.to(device).eval()
+
+        # # Second-stage classifier
+        # classify = False
+        # if classify:
+        #     modelc = torch_utils.load_classifier(name='resnet101', n=2)  # initialize
+        #     modelc.load_state_dict(torch.load('weights/resnet101.pt', map_location=device)['model'])  # load weights
+        #     modelc.to(device).eval()
+
+        # Half precision
+        half = True and device.type != 'cpu' 
+        print('half = ' + str(half))
+
+        if half:
+            model.half()
+        
+
+        # Set Dataloader
+        vid_path, vid_writer = None, None
+        dataset = LoadStreams(source, img_size=imgsz)
+        #dataset = LoadStreams(source, img_size=imgsz)
+        names = model.names if hasattr(model, 'names') else model.modules.names
+        colors = [[random.randint(0, 255) for _ in range(3)] for _ in range(len(names))]
+
+        # Run inference
+        t0 = time.time()
+        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
+        _ = model(img.half() if half else img) if device.type != 'cpu' else None  # run once
+        for path, img, im0s, vid_cap in dataset:
+            img = torch.from_numpy(img).to(device)
+            img = img.half() if half else img.float()  # uint8 to fp16/32
+            img /= 255.0  # 0 - 255 to 0.0 - 1.0
+            if img.ndimension() == 3:
+                img = img.unsqueeze(0)
+
+            # Inference
+            # t1 = torch_utils.time_synchronized()
+            t1 = time_synchronized()
+            pred = model(img, augment=False)[0]
+            
+            # Apply NMS
+            pred = non_max_suppression(pred, 0.4, 0.5, classes=None, agnostic=False)
+            # t2 = torch_utils.time_synchronized()
+            t2 = time_synchronized()
+
+            # # Apply Classifier
+            # if classify:
+            #     pred = apply_classifier(pred, modelc, img, im0s)
+
+            for i, det in enumerate(pred):  # detections per image
+                p, s, im0 = path, '', im0s
+
+                # save_path = str(Path(out) / Path(p).name)
+                s += '%gx%g ' % img.shape[2:]  # print string
+                gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  #  normalization gain whwh
+                if det is not None and len(det):
+                    # Rescale boxes from img_size to im0 size
+                    det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()
+                    
+                    #for c in det[:, -1].unique():  #probably error with torch 1.5
+                    for c in det[:, -1].detach().unique():
+                        n = (det[:, -1] == c).sum()  # detections per class
+                        s += '%g %s, ' % (n, names[int(c)])  # add to string
+                        
+                    for *xyxy, conf, cls in det:
+                        label = '%s %.2f' % (names[int(cls)], conf)
+                        plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=3)
+                print('%sDone. (%.3fs)' % (s, t2 - t1))
+ 
+            yield cv2.imencode('.jpg', im0)[1].tobytes()

+ 1 - 1
yolov7-main/cfg/deploy/yolov7x.yaml

@@ -1,5 +1,5 @@
 # parameters
-nc: 80  # number of classes
+nc: 3  # number of classes
 depth_multiple: 1.0  # model depth multiple
 width_multiple: 1.0  # layer channel multiple
 

+ 1 - 1
yolov7-main/data/hyp.scratch.custom.yaml

@@ -2,7 +2,7 @@ lr0: 0.01  # initial learning rate (SGD=1E-2, Adam=1E-3)
 lrf: 0.1  # final OneCycleLR learning rate (lr0 * lrf)
 momentum: 0.937  # SGD momentum/Adam beta1
 weight_decay: 0.0005  # optimizer weight decay 5e-4
-warmup_epochs: 3.0  # warmup epochs (fractions ok)
+warmup_epochs: 1  # warmup epochs (fractions ok)
 warmup_momentum: 0.8  # warmup initial momentum
 warmup_bias_lr: 0.1  # warmup initial bias lr
 box: 0.05  # box loss gain

+ 10 - 4
yolov7-main/detect.py

@@ -1,7 +1,7 @@
 import argparse
 import time
 from pathlib import Path
-
+from collections import deque
 import cv2
 import torch
 import torch.backends.cudnn as cudnn
@@ -83,7 +83,9 @@ def detect(save_img=False):
         model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters())))  # run once
     old_img_w = old_img_h = imgsz
     old_img_b = 1
-
+    
+    import pdb
+    queue = deque()
     t0 = time.time()
     for path, img, im0s, vid_cap in dataset:
         img = torch.from_numpy(img).to(device)
@@ -91,6 +93,8 @@ def detect(save_img=False):
         img /= 255.0  # 0 - 255 to 0.0 - 1.0
         if img.ndimension() == 3:
             img = img.unsqueeze(0)
+        print(img, img.shape)
+        pdb.set_trace()
 
         # Warmup
         if device.type != 'cpu' and (old_img_b != img.shape[0] or old_img_h != img.shape[2] or old_img_w != img.shape[3]):
@@ -99,13 +103,14 @@ def detect(save_img=False):
             old_img_w = img.shape[3]
             for i in range(3):
                 model(img, augment=opt.augment)[0]
-
+        print(opt.augment)
+        pdb.set_trace()
         # Inference
         t1 = time_synchronized()
         with torch.no_grad():   # Calculating gradients would cause a GPU memory leak
             pred = model(img, augment=opt.augment)[0]
         t2 = time_synchronized()
-
+        
         # Apply NMS
         pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres, classes=opt.classes, agnostic=opt.agnostic_nms)
         t3 = time_synchronized()
@@ -150,6 +155,7 @@ def detect(save_img=False):
             print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
 
             # Stream results
+            queue.append(im0)
             if view_img:
                 cv2.imshow(str(p), im0)
                 # plt.imshow(str(p), im0)

+ 280 - 1
yolov7-main/hi.ipynb

@@ -18,6 +18,285 @@
     "#print(a)"
    ]
   },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "tensor([[[2, 0, 4],\n",
+       "         [1, 3, 2],\n",
+       "         [1, 2, 3]],\n",
+       "\n",
+       "        [[4, 4, 0],\n",
+       "         [2, 2, 0],\n",
+       "         [1, 2, 0]],\n",
+       "\n",
+       "        [[4, 1, 0],\n",
+       "         [4, 0, 2],\n",
+       "         [4, 4, 1]]])"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import torch\n",
+    "from utils.torch_utils import select_device\n",
+    "\n",
+    "select_device"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "torch.Size([3, 3, 3]) 4\n",
+      "tensor([[[4, 1, 0],\n",
+      "         [4, 0, 2],\n",
+      "         [4, 4, 1]],\n",
+      "\n",
+      "        [[4, 4, 0],\n",
+      "         [2, 2, 0],\n",
+      "         [1, 2, 0]],\n",
+      "\n",
+      "        [[2, 0, 4],\n",
+      "         [1, 3, 2],\n",
+      "         [1, 2, 3]]])\n"
+     ]
+    }
+   ],
+   "source": [
+    "a = a[[2, 1, 0], :, :]\n",
+    "print(a.shape, 4)\n",
+    "print(a)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/html": [
+       "<div>\n",
+       "<style scoped>\n",
+       "    .dataframe tbody tr th:only-of-type {\n",
+       "        vertical-align: middle;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe tbody tr th {\n",
+       "        vertical-align: top;\n",
+       "    }\n",
+       "\n",
+       "    .dataframe thead th {\n",
+       "        text-align: right;\n",
+       "    }\n",
+       "</style>\n",
+       "<table border=\"1\" class=\"dataframe\">\n",
+       "  <thead>\n",
+       "    <tr style=\"text-align: right;\">\n",
+       "      <th></th>\n",
+       "      <th>a</th>\n",
+       "      <th>b</th>\n",
+       "      <th>c</th>\n",
+       "      <th>d</th>\n",
+       "    </tr>\n",
+       "  </thead>\n",
+       "  <tbody>\n",
+       "    <tr>\n",
+       "      <th>0</th>\n",
+       "      <td>46</td>\n",
+       "      <td>37</td>\n",
+       "      <td>22</td>\n",
+       "      <td>8</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>1</th>\n",
+       "      <td>19</td>\n",
+       "      <td>38</td>\n",
+       "      <td>11</td>\n",
+       "      <td>10</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>2</th>\n",
+       "      <td>15</td>\n",
+       "      <td>22</td>\n",
+       "      <td>40</td>\n",
+       "      <td>30</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>3</th>\n",
+       "      <td>5</td>\n",
+       "      <td>31</td>\n",
+       "      <td>34</td>\n",
+       "      <td>45</td>\n",
+       "    </tr>\n",
+       "    <tr>\n",
+       "      <th>4</th>\n",
+       "      <td>46</td>\n",
+       "      <td>47</td>\n",
+       "      <td>2</td>\n",
+       "      <td>46</td>\n",
+       "    </tr>\n",
+       "  </tbody>\n",
+       "</table>\n",
+       "</div>"
+      ],
+      "text/plain": [
+       "    a   b   c   d\n",
+       "0  46  37  22   8\n",
+       "1  19  38  11  10\n",
+       "2  15  22  40  30\n",
+       "3   5  31  34  45\n",
+       "4  46  47   2  46"
+      ]
+     },
+     "execution_count": 3,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "import numpy as np\n",
+    "import pandas as pd\n",
+    "df = pd.DataFrame(np.random.randint(0, 50, size=(5000000, 4)), columns=('a','b','c','d'))\n",
+    "df.shape\n",
+    "# (5000000, 5)\n",
+    "df.head()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "451.1593863964081\n"
+     ]
+    }
+   ],
+   "source": [
+    "import time \n",
+    "start = time.time() # iterrows for idx, row in df.iterrows() \n",
+    "for idx, row in df.iterrows():\n",
+    "    if row.a == 0 :         \n",
+    "        df.at[idx, 'e' ] = row.d     \n",
+    "    elif ( row.a <= 25 ) and (row.a > 0 ):         \n",
+    "        df.at[idx, 'e' ] = (row.b)-(row.c)     \n",
+    "    else :         \n",
+    "        df.at[idx, 'e' ] = row.b + row.c \n",
+    "end = time.time()\n",
+    "print (end - start) ### 걸린 시간: 177초\n",
+    "\n",
+    "    \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "0.16010785102844238\n"
+     ]
+    }
+   ],
+   "source": [
+    "# using vectorization \n",
+    "\n",
+    "start = time.time()\n",
+    "df['e'] = df['b'] + df['c']\n",
+    "df.loc[df['a'] <= 25, 'e'] = df['b'] -df['c']\n",
+    "df.loc[df['a']==0, 'e'] = df['d']\n",
+    "end = time.time()\n",
+    "print(end - start)\n",
+    "## 0.28007707595825195 sec"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "A\n",
+      "B\n",
+      "C\n"
+     ]
+    }
+   ],
+   "source": [
+    "import time\n",
+    "\n",
+    "def yield_abc():\n",
+    "  for ch in \"ABC\":\n",
+    "    time.sleep(1)\n",
+    "    yield ch\n",
+    "\n",
+    "for ch in yield_abc():\n",
+    "    print(ch)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 9,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "False"
+      ]
+     },
+     "execution_count": 9,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "False and device.type != 'cpu' \n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 11,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "False"
+      ]
+     },
+     "execution_count": 11,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "False and True"
+   ]
+  },
   {
    "cell_type": "code",
    "execution_count": null,
@@ -42,7 +321,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.8.13"
+   "version": "3.8.13 | packaged by conda-forge | (default, Mar 25 2022, 06:04:10) \n[GCC 10.3.0]"
   },
   "orig_nbformat": 4,
   "vscode": {

+ 3 - 3
yolov7-main/models/experimental.py

@@ -248,10 +248,10 @@ def attempt_load(weights, map_location=None):
     for w in weights if isinstance(weights, list) else [weights]:
         # attempt_download(w)
         ckpt = torch.load(w, map_location=map_location)  # load
-        print(ckpt.keys())
-        print(ckpt['state_dict'])
+        # print(ckpt.keys())
+        # print(ckpt['state_dict'])
         model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval())  # FP32 model
-        pdb.set_trace()
+        # pdb.set_trace()
 
     # Compatibility updates
     for m in model.modules():

+ 4 - 1
yolov7-main/templates/index.html

@@ -1,9 +1,12 @@
 <html>
   <head>
+    <meta charset="UTF-8">
+    <meta http-equiv="X-UA-Compatible" content="IE=edge">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
     <title>Video Streaming Test</title>
   </head>
   <body>
     <h1>Video Streaming Test</h1>
-    <img src="{{ url_for('video') }}">
+    <img src="{{ url_for('video') }}" alt="">yolov7</img>
   </body>
 </html>