yjlim преди 2 години
родител
ревизия
8040d2035c

+ 2 - 0
.gitignore

@@ -0,0 +1,2 @@
+*.MP4
+*.mp4

+ 8 - 2
README.md

@@ -28,9 +28,15 @@ cuda를 사용하실 경우 터미널에서 nvidia-smi 확인하시고 빈 디
 
 
 # train
-`python train.py --workers 8 --device 0 --batch-size 32 --data data/coco_copy.yaml --img 640 640 --cfg cfg/training/yolov7_custom.yaml --weights '/root/Public/pretrained/yolov7_training.pt' --name yolov7-helmet_det --hyp data/hyp.scratch.custom.yaml --epochs 20`
+```python train.py --workers 8 --device 0 --batch-size 32 --data data/coco_copy.yaml --img 640 640 --cfg cfg/training/yolov7_custom.yaml --weights '/root/Public/pretrained/yolov7_training.pt' --name yolov7-helmet_det --hyp data/hyp.scratch.custom.yaml --epochs 20```
 
 `train.py` 의 `args` 참고하셔서 원하시는대로 변경 후 학습 결과 비교하시면 됩니다.
 - cfg/*.yaml 파일 변경하시는 경우 꼭 number of classes는 변경하시면 안됩니다. 현재 {0:helmet, 1:person, 2:head}로 nc는 3 입니다.
 - 이미지 사이즈도 640*640 으로 고정하는게 학습결과 비교하기 좋을 것 같습니다. (yolo pretrain 이미지 사이즈)
-학습 결과는 `https://wandb.ai/wsangbae/helmet-det/runs/2tbbj4db?workspace=user-wsangbae` 에서 확인하실 수 있습니다.
+학습 결과는 `https://wandb.ai/wsangbae/helmet-det/runs/2tbbj4db?workspace=user-wsangbae` 에서 확인하실 수 있습니다.
+
+
+# dependency
+ubuntu 환경에서
+`apt-get install libgtk2.0-dev`
+`apt-get install pkg-config`

BIN
web/__pycache__/camera.cpython-38.pyc


BIN
web/__pycache__/object_detection.cpython-38.pyc


+ 83 - 0
web/app.py

@@ -0,0 +1,83 @@
+from importlib import import_module
+import os
+from flask import Flask, render_template, Response, request, send_file
+import cv2
+import subprocess
+import time
+import sys
+import pdb
+# import camera driver
+# from object_detection import VideoStreaming
+
+if os.environ.get('CAMERA'):
+    Camera = import_module('camera_' + os.environ['CAMERA']).Camera
+else:
+    from camera import Camera
+
+
+app = Flask(__name__)
+
+
+# def gen(camera):
+#     while True:
+#         frame = VideoStreaming.get_frame()
+#         # cv2.imencode('.jpg', frame)
+
+#         yield (b'--frame\r\n'
+#                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
+class Model():
+    def __init__(self, device='cuda'):
+        tstamp = time.time()
+        self.device = device
+        self.net = 
+
+
+def get_stream_video():
+    # camera 정의
+    cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
+
+    
+
+    while True:
+        # 카메라 값 불러오기
+        success, frame = cam.read()
+        # print(frame)
+        # print(type(frame))
+        if not success:
+            break
+        else:
+            ret, buffer = cv2.imencode('.jpeg', frame)
+            # frame을 byte로 변경 후 특정 식??으로 변환 후에
+            # yield로 하나씩 넘겨준다.
+            print(type(buffer))
+            frame = buffer.tobytes()
+            print(type(frame))
+            pdb.set_trace()
+            yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(frame) + b'\r\n')
+
+
+@app.route('/')
+def index():
+    return render_template('index.html')
+
+
+# 스트리밍 경로를 /video 경로로 설정.
+@app.get("/video")
+def video():
+    # StringResponse함수를 return하고,
+    # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
+    return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
+
+
+# ipcam 열기
+@app.route("/stream", methods=['GET'])
+def stream():
+    print("here")
+    subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', 'best.pt'])
+    return "done"
+# rtsp://astrodom:hdci12@192.168.170.73:554/stream1
+
+
+
+if __name__ == '__main__':
+    app.run(host='0.0.0.0', debug=True)

BIN
web/best.pt


+ 19 - 0
web/camera.py

@@ -0,0 +1,19 @@
+from time import time
+import glob
+import os
+
+PATH = '/root/astrodom/jpeg/*'
+
+class Camera(object):
+    def __init__(self):
+        # file_list = glob.glob(PATH)
+        filst = sorted(glob.glob(PATH), key=os.path.getctime)
+        file_list = [file for file in filst if file.endswith(".jpeg")]
+
+        # print(file_list_py)
+        self.frames = [open(f, 'rb').read() for f in file_list]
+        print(len(self.frames), self.frames)
+    
+    def get_frame(self):
+        return self.frames[int(time()) % 10]
+        # return self.frames[i for i, _ in enumerate(file_list)]

+ 104 - 0
web/object_detection.py

@@ -0,0 +1,104 @@
+import cv2
+import time
+import os
+import numpy as np
+
+
+
+# class VideoStreaming(object):
+#     def __init__(self):
+#         super(VideoStreaming, self).__init__()
+#         self.VIDEO = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
+
+#         self.MODEL = ObjectDetection()
+
+#         self._preview = True
+#         self._flipH = False
+#         self._detect = False
+#         self._exposure = self.VIDEO.get(cv2.CAP_PROP_EXPOSURE)
+#         self._contrast = self.VIDEO.get(cv2.CAP_PROP_CONTRAST)
+
+#     @property
+#     def preview(self):
+#         return self._preview
+
+#     @preview.setter
+#     def preview(self, value):
+#         self._preview = bool(value)
+
+#     @property
+#     def flipH(self):
+#         return self._flipH
+
+#     @flipH.setter
+#     def flipH(self, value):
+#         self._flipH = bool(value)
+
+#     @property
+#     def detect(self):
+#         return self._detect
+
+#     @detect.setter
+#     def detect(self, value):
+#         self._detect = bool(value)
+
+#     @property
+#     def exposure(self):
+#         return self._exposure
+
+#     @exposure.setter
+#     def exposure(self, value):
+#         self._exposure = value
+#         self.VIDEO.set(cv2.CAP_PROP_EXPOSURE, self._exposure)
+
+#     @property
+#     def contrast(self):
+#         return self._contrast
+
+#     @contrast.setter
+#     def contrast(self, value):
+#         self._contrast = value
+#         self.VIDEO.set(cv2.CAP_PROP_CONTRAST, self._contrast)
+
+#     def show(self):
+#         while(self.VIDEO.isOpened()):
+#             ret, snap = self.VIDEO.read()
+#             if self.flipH:
+#                 snap = cv2.flip(snap, 1)
+
+#             if ret == True:
+#                 if self._preview:
+#                     # snap = cv2.resize(snap, (0, 0), fx=0.5, fy=0.5)
+#                     if self.detect:
+#                         snap = self.MODEL.detectObj(snap)
+
+#                 else:
+#                     snap = np.zeros((
+#                         int(self.VIDEO.get(cv2.CAP_PROP_FRAME_HEIGHT)),
+#                         int(self.VIDEO.get(cv2.CAP_PROP_FRAME_WIDTH))
+#                     ), np.uint8)
+#                     label = "camera disabled"
+#                     H, W = snap.shape
+#                     font = cv2.FONT_HERSHEY_PLAIN
+#                     color = (255, 255, 255)
+#                     cv2.putText(snap, label, (W//2 - 100, H//2),
+#                                 font, 2, color, 2)
+
+#                 frame = cv2.imencode(".jpg", snap)[1].tobytes()
+#                 yield (b'--frame\r\n'b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
+#                 time.sleep(0.01)
+
+#             else:
+#                 break
+#         print("off")
+
+
+class VideoStreaming(object):
+    def __init__(self):
+        self.video = cv2.VideoCapture(0)
+    def __del__(self):
+        self.video.release()
+    def get_frame(self):
+        ret, frame = self.video.read()
+        ret, jpg = cv2.imencode('.jpg', frame)
+        return jpg.toByte()

+ 9 - 0
web/templates/index.html

@@ -0,0 +1,9 @@
+<html>
+  <head>
+    <title>Video Streaming Test</title>
+  </head>
+  <body>
+    <h1>Video Streaming Test</h1>
+    <img src="{{ url_for('video') }}">
+  </body>
+</html>

BIN
web/traced_model.pt


+ 2 - 0
yolov7-main/.gitignore

@@ -22,6 +22,8 @@
 *.cfg
 !setup.cfg
 !cfg/yolov3*.cfg
+*.mp4
+*.MP4
 
 storage.googleapis.com
 runs/*

Файловите разлики са ограничени, защото са твърде много
+ 8 - 53
yolov7-main/Demo.ipynb


+ 147 - 0
yolov7-main/app.py

@@ -0,0 +1,147 @@
+from importlib import import_module
+import os
+from flask import Flask, render_template, Response, request, send_file
+import cv2
+import subprocess
+import time
+import sys
+import logging
+import pdb
+
+sys.path.append('./')  # to run '$ python *.py' files in subdirectories
+logger = logging.getLogger(__name__)
+import torch
+from models.common import *
+from models.experimental import *
+from models.yolo import *
+from utils.autoanchor import check_anchor_order
+from utils.general import make_divisible, check_file, set_logging
+from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
+    select_device, copy_attr
+from utils.loss import SigmoidBin
+
+
+# import camera driver
+# from object_detection import VideoStreaming
+
+# if os.environ.get('CAMERA'):
+#     Camera = import_module('camera_' + os.environ['CAMERA']).Camera
+# else:
+#     from camera import Camera
+
+
+app = Flask(__name__)
+
+
+# def gen(camera):
+#     while True:
+#         frame = VideoStreaming.get_frame()
+#         # cv2.imencode('.jpg', frame)
+
+#         yield (b'--frame\r\n'
+#                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
+PATH_WEIGHT = './models/best.pt'
+img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')
+
+
+class Net():
+    def __init__(self, device='cuda'):
+        tstamp = time.time()
+        self.device = select_device(device)
+        print('[yolo] loading with', self.device)
+        self.net = Model('/root/helmet_det/yolov7-main/cfg/training/yolov7.yaml').to(self.device)
+        state_dict = torch.load(PATH_WEIGHT, map_location=self.device)['model'].state_dict()
+        self.net.load_state_dict(state_dict)
+        self.net.eval()
+        print('[yolo] finished loading (%.4f sec)' % (time.time() - tstamp))
+
+    def detect_faces(self, image, conf_th=0.8, scales=[1]):
+        print(image, image.shape)
+        print(len(image), len(image.shape))
+        print('*'*30)
+        w, h = image.shape[1], image.shape[0]
+        bboxes = np.empty(shape=(0, 5))
+
+        with torch.no_grad():
+            for s in scales:
+                scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
+
+                scaled_img = np.swapaxes(scaled_img, 1, 2)
+                scaled_img = np.swapaxes(scaled_img, 1, 0)
+                scaled_img = scaled_img[[2, 1, 0], :, :]
+                scaled_img = scaled_img.astype('float32')
+                scaled_img -= img_mean
+                scaled_img = scaled_img[[2, 1, 0], :, :]
+                x = torch.from_numpy(scaled_img).unsqueeze(0).to(self.device)
+                y = self.net(x)
+
+                detections = y.data
+                scale = torch.Tensor([w, h, w, h])
+
+                for i in range(detections.size(1)):
+                    j = 0
+                    while detections[0, i, j, 0] > conf_th:
+                        score = detections[0, i, j, 0]
+                        pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
+                        bbox = (pt[0], pt[1], pt[2], pt[3], score)
+                        bboxes = np.vstack((bboxes, bbox))
+                        j += 1
+
+            # keep = nms_(bboxes, 0.1) ## nms?
+            # bboxes = bboxes[keep]
+
+        return bboxes
+
+
+
+def get_stream_video():
+    # camera 정의
+    cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
+
+    model = Net()    
+
+    while True:
+        # 카메라 값 불러오기
+        success, frame = cam.read()
+        # print(frame)
+        # print(type(frame))
+        if not success:
+            break
+        else:
+            ret, buffer = cv2.imencode('.jpeg', frame)
+            # frame을 byte로 변경 후 특정 식??으로 변환 후에
+            # yield로 하나씩 넘겨준다.
+
+            decode_img = cv2.imdecode(buffer, 1)
+            frame = model.detect_faces(decode_img).tobytes()
+            # frame = buffer.tobytes()
+            print(type(frame))
+            pdb.set_trace()
+            yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(frame) + b'\r\n')
+
+
+@app.route('/')
+def index():
+    return render_template('index.html')
+
+
+# 스트리밍 경로를 /video 경로로 설정.
+@app.get("/video")
+def video():
+    # StringResponse함수를 return하고,
+    # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
+    return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
+
+
+# ipcam 열기
+@app.route("/stream", methods=['GET'])
+def stream():
+    print("here")
+    subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', 'best.pt'])
+    return "done"
+# rtsp://astrodom:hdci12@192.168.170.73:554/stream1
+
+
+
+if __name__ == '__main__':
+    app.run(host='0.0.0.0', port=5000, debug=True)

+ 76 - 0
yolov7-main/app_.py

@@ -0,0 +1,76 @@
+from importlib import import_module
+import os
+from flask import Flask, render_template, Response, request, send_file
+import cv2
+import subprocess
+# import camera driver
+# from object_detection import VideoStreaming
+from detect import frame_
+
+# if os.environ.get('CAMERA'):
+#     Camera = import_module('camera_' + os.environ['CAMERA']).Camera
+# else:
+#     from camera import Camera
+
+
+app = Flask(__name__)
+
+
+# def gen(camera):
+
+#     while True:
+#         frame = VideoStreaming.get_frame()
+#         # cv2.imencode('.jpg', frame)
+
+#         yield (b'--frame\r\n'
+#                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
+
+
+def get_stream_video():
+    # camera 정의
+    cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')
+
+    while True:
+        # 카메라 값 불러오기
+        # f = frame_()
+        # print(f.inference())
+        
+        success, frame = cam.read()
+        # print(frame)
+        # print(type(frame))
+        if not success:
+            break
+        else:
+            ret, buffer = cv2.imencode('.jpeg', frame)
+            # frame을 byte로 변경 후 특정 식??으로 변환 후에
+            # yield로 하나씩 넘겨준다.
+            frame = buffer.tobytes()
+            yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' +
+               bytearray(frame) + b'\r\n')
+
+
+@app.route('/')
+def index():
+    return render_template('index.html')
+
+
+# 스트리밍 경로를 /video 경로로 설정.
+@app.get("/video")
+def video():
+    # StringResponse함수를 return하고,
+    # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
+    return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
+
+
+# ipcam 열기
+@app.route("/stream", methods=['GET'])
+def stream():
+    print("here")
+    subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', 'best.pt'])
+    return "done"
+# rtsp://astrodom:hdci12@192.168.170.73:554/stream1
+
+
+
+if __name__ == '__main__':
+    app.run(host='0.0.0.0', debug=True)

+ 1 - 1
yolov7-main/cfg/baseline/yolor-csp-x.yaml

@@ -1,5 +1,5 @@
 # parameters
-nc: 80  # number of classes
+nc: 3  # number of classes
 depth_multiple: 1.33  # model depth multiple
 width_multiple: 1.25  # layer channel multiple
 

+ 35 - 3
yolov7-main/detect.py

@@ -14,6 +14,23 @@ from utils.general import check_img_size, check_requirements, check_imshow, non_
 from utils.plots import plot_one_box
 from utils.torch_utils import select_device, load_classifier, time_synchronized, TracedModel
 
+import matplotlib.pyplot as plt
+from time import sleep
+
+
+class frame_:
+    def __init__(self):
+        
+        self.model = attempt_load(opt.weights, map_location=opt.device).half()  # load FP32 model -> FP16
+        self.stride = int(self.model.stride.max())  # model stride
+        self.imgsz = check_img_size(self.imgsz, s=self.stride)  # check img_size
+        self.x = LoadStreams(opt.source, img_size=opt.imgsz, stride=opt.stride)
+
+    def inference(cls):
+        y = cls.model(cls.x)
+        print(y)
+        return y
+
 
 def detect(save_img=False):
     source, weights, view_img, save_txt, imgsz, trace = opt.source, opt.weights, opt.view_img, opt.save_txt, opt.img_size, not opt.no_trace
@@ -51,6 +68,7 @@ def detect(save_img=False):
     vid_path, vid_writer = None, None
     if webcam:
         view_img = check_imshow()
+        print(f'view_img : {view_img}')
         cudnn.benchmark = True  # set True to speed up constant image size inference
         dataset = LoadStreams(source, img_size=imgsz, stride=stride)
     else:
@@ -126,7 +144,7 @@ def detect(save_img=False):
 
                     if save_img or view_img:  # Add bbox to image
                         label = f'{names[int(cls)]} {conf:.2f}'
-                        plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thinckness=1)
+                        plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
 
             # Print time (inference + NMS)
             print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')
@@ -134,6 +152,7 @@ def detect(save_img=False):
             # Stream results
             if view_img:
                 cv2.imshow(str(p), im0)
+                # plt.imshow(str(p), im0)
                 cv2.waitKey(1)  # 1 millisecond
 
             # Save results (image with detections)
@@ -165,7 +184,7 @@ def detect(save_img=False):
 
 if __name__ == '__main__':
     parser = argparse.ArgumentParser()
-    parser.add_argument('--weights', nargs='+', type=str, default='Public/pretrained/yolov7_training.pt', help='model.pt path(s)')
+    parser.add_argument('--weights', nargs='+', type=str, default='best.pt', help='model.pt path(s)')
     parser.add_argument('--source', type=str, default='inference/images', help='source')  # file/folder, 0 for webcam
     parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
     parser.add_argument('--conf-thres', type=float, default=0.25, help='object confidence threshold')
@@ -183,8 +202,10 @@ if __name__ == '__main__':
     parser.add_argument('--name', default='exp', help='save results to project/name')
     parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
     parser.add_argument('--no-trace', action='store_true', help='don`t trace model')
+
+    parser.add_argument('--stream', default=False, help='stream')
     opt = parser.parse_args()
-    print(opt)
+    # print(opt)
     #check_requirements(exclude=('pycocotools', 'thop'))
 
     with torch.no_grad():
@@ -194,3 +215,14 @@ if __name__ == '__main__':
                 strip_optimizer(opt.weights)
         else:
             detect()
+            # if opt.stream == True:
+            #     @app.get("/video")
+            #     def video():
+            #         # StringResponse함수를 return하고,
+            #         # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
+            #         return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")
+            # else: detect()
+
+
+    
+ 

+ 56 - 0
yolov7-main/hi.ipynb

@@ -0,0 +1,56 @@
+{
+ "cells": [
+  {
+   "cell_type": "code",
+   "execution_count": 23,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from models.yolo import *\n",
+    "PATH_WEIGHT = './models/best.pt'\n",
+    "PATH = '/root/Public/pretrained/best.pt'\n",
+    "\n",
+    "net = Model('/root/helmet_det/yolov7-main/cfg/training/yolov7.yaml').to('cuda')\n",
+    "state_dict = torch.load(PATH, map_location='cuda')['model'].state_dict()\n",
+    "net.load_state_dict(state_dict, strict=False)\n",
+    "\n",
+    "a = torch.load(PATH_WEIGHT, map_location='cuda')['model']\n",
+    "#print(a)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3.8.13 ('base')",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.13"
+  },
+  "orig_nbformat": 4,
+  "vscode": {
+   "interpreter": {
+    "hash": "d4d1e4263499bec80672ea0156c357c1ee493ec2b1c70f0acce89fc37c4a6abe"
+   }
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}

+ 6 - 6
yolov7-main/models/experimental.py

@@ -241,17 +241,18 @@ class End2End(nn.Module):
         return x
 
 
-
-
-
+import pdb
 def attempt_load(weights, map_location=None):
     # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
     model = Ensemble()
     for w in weights if isinstance(weights, list) else [weights]:
-        attempt_download(w)
+        # attempt_download(w)
         ckpt = torch.load(w, map_location=map_location)  # load
+        print(ckpt.keys())
+        print(ckpt['state_dict'])
         model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval())  # FP32 model
-    
+        pdb.set_trace()
+
     # Compatibility updates
     for m in model.modules():
         if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
@@ -260,7 +261,6 @@ def attempt_load(weights, map_location=None):
             m.recompute_scale_factor = None  # torch 1.11.0 compatibility
         elif type(m) is Conv:
             m._non_persistent_buffers_set = set()  # pytorch 1.6.0 compatibility
-    
     if len(model) == 1:
         return model[-1]  # return model
     else:

+ 3 - 3
yolov7-main/requirements.txt

@@ -3,8 +3,8 @@
 # Base ----------------------------------------
 matplotlib>=3.2.2
 numpy>=1.18.5
-opencv-python>=4.1.1
-opencv-python-headless<4.3
+opencv-python==4.5.5.62
+opencv-python-headless<4.2
 Pillow>=7.1.2
 PyYAML>=5.3.1
 requests>=2.23.0
@@ -15,7 +15,7 @@ tqdm>=4.41.0
 protobuf<4.21.3
 
 # Logging -------------------------------------
-tensorboard>=2.4.1
+# tensorboard>=2.4.1
 # wandb
 
 # Plotting ------------------------------------

+ 9 - 0
yolov7-main/templates/index.html

@@ -0,0 +1,9 @@
+<html>
+  <head>
+    <title>Video Streaming Test</title>
+  </head>
+  <body>
+    <h1>Video Streaming Test</h1>
+    <img src="{{ url_for('video') }}">
+  </body>
+</html>

+ 14 - 0
yolov7-main/tracking.py

@@ -0,0 +1,14 @@
+from detection_helpers import *
+from tracking_helpers import *
+from bridge_wrapper import *
+from PIL import Image
+
+detector = Detector(classes = [0,1,2]) # it'll detect ONLY [helmet, people, head]. class = None means detect all classes. List info at: "data/coco.yaml"
+detector.load_model('/root/helmet_det/yolov7-main/models/best.pt', img_size=2016) # pass the path to the trained weight file
+
+# Initialise  class that binds detector and tracker in one class
+tracker = YOLOv7_DeepSORT(reID_model_path="./deep_sort/model_weights/mars-small128.pb", detector=detector)
+
+# output = None will not save the output video
+FILE = 'SD_Gate_Cam_1'
+tracker.track_video(f"./IO_data/input/video/{FILE}.MP4", output=f"./IO_data/output/SD_Gate_Cam_2.mp4", show_live = False, skip_frames = None, count_objects = True, verbose=1)

+ 1 - 0
yolov7-main/train.py

@@ -459,6 +459,7 @@ def train(hyp, opt, device, tb_writer=None):
                         'best_fitness': best_fitness,
                         'training_results': results_file.read_text(),
                         'model': deepcopy(model.module if is_parallel(model) else model).half(),
+                        # 'state_dict': model.state_dict(),
                         'ema': deepcopy(ema.ema).half(),
                         'updates': ema.updates,
                         'optimizer': optimizer.state_dict(),

+ 1 - 1
yolov7-main/utils/torch_utils.py

@@ -67,7 +67,7 @@ def select_device(device='', batch_size=None):
     if cpu:
         os.environ['CUDA_VISIBLE_DEVICES'] = '-1'  # force torch.cuda.is_available() = False
     elif device:  # non-cpu device requested
-        os.environ['CUDA_VISIBLE_DEVICES'] = device  # set environment variable
+        # os.environ['CUDA_VISIBLE_DEVICES'] = device  # set environment variable
         assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested'  # check availability
 
     cuda = not cpu and torch.cuda.is_available()

Някои файлове не бяха показани, защото твърде много файлове са промени