from importlib import import_module
import os
from flask import Flask, render_template, Response, request, send_file
import cv2
import subprocess
import time
import sys
import logging
import pdb

sys.path.append('./')  # to run '$ python *.py' files in subdirectories
logger = logging.getLogger(__name__)
import torch
from models.common import *
from models.experimental import *
from models.yolo import *
from utils.autoanchor import check_anchor_order
from utils.general import make_divisible, check_file, set_logging, check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
    scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.torch_utils import time_synchronized, fuse_conv_and_bn, model_info, scale_img, initialize_weights, \
    select_device, copy_attr
from utils.loss import SigmoidBin


# import camera driver
# from object_detection import VideoStreaming

# if os.environ.get('CAMERA'):
#     Camera = import_module('camera_' + os.environ['CAMERA']).Camera
# else:
#     from camera import Camera


app = Flask(__name__)


# def gen(camera):
#     while True:
#         frame = VideoStreaming.get_frame()
#         # cv2.imencode('.jpg', frame)

#         yield (b'--frame\r\n'
#                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
PATH_WEIGHT = './models/best.pt'
img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')


class Net:
    def __init__(self, device='cuda'):
        tstamp = time.time()
        self.device = select_device(device)
        print('[yolo] loading with', self.device)
        # self.net = Model('/root/helmet_det/yolov7-main/cfg/training/yolov7_custom.yaml').to(self.device)
        # state_dict = torch.load(PATH_WEIGHT, map_location=self.device)['model'].state_dict()
        self.net = attempt_load(PATH_WEIGHT).cuda()
        # self.net.load_state_dict(state_dict)
        self.net.eval()
        print('[yolo] finished loading (%.4f sec)' % (time.time() - tstamp))

    def detect_faces(self, image, conf_th=0.8, scales=[1]):
        
        w, h = image.shape[1], image.shape[0]
        # print(w,h)
        # pdb.set_trace()
        bboxes = np.empty(shape=(0, 5))

        with torch.no_grad():
            for s in scales:
                # print(image, image.shape)
                scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
                scaled_img = np.swapaxes(scaled_img, 1, 2)
                scaled_img = np.swapaxes(scaled_img, 1, 0)
                # scaled_img = scaled_img[[2, 1, 0], :, :]

                scaled_img = scaled_img.astype('float32')
                # scaled_img -= img_mean
                # scaled_img = scaled_img[[2, 1, 0], :, :]
                x = torch.FloatTensor(scaled_img).unsqueeze(0).to(self.device)
                #x = x.permute(0,3,1,2) # (B, W, H, C) --> (B, C, W, H)
                # x = torch.from_numpy(scaled_img).to(self.device)
                # pdb.set_trace()
                y = self.net(x)[0]
                y = non_max_suppression(y)
                
                # Process detections
                for i, det in enumerate(y):  # detections per image
    
                    # gn = torch.tensor(im0.shape)[[1, 0, 1, 0]]  # normalization gain whwh
                    if len(det):
                        # Rescale boxes from img_size to im0 size
                        det[:, :4] = scale_coords(x.shape[2:], det[:, :4], x.shape).round()

                        # Print results
                        for c in det[:, -1].unique():
                            n = (det[:, -1] == c).sum()  # detections per class
                            # s += f"{n} {names[int(c)]}{'s' * (n > 1)}, "  # add to string

                        # # Write results
                        # for *xyxy, conf, cls in reversed(det):
                        #     if save_txt:  # Write to file
                        #         xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist()  # normalized xywh
                        #         line = (cls, *xywh, conf) if opt.save_conf else (cls, *xywh)  # label format
                        #         with open(txt_path + '.txt', 'a') as f:
                        #             f.write(('%g ' * len(line)).rstrip() % line + '\n')

                        #     if save_img or view_img:  # Add bbox to image
                        #         label = f'{names[int(cls)]} {conf:.2f}'
                        #         plot_one_box(xyxy, im0, label=label, color=colors[int(cls)], line_thickness=1)
                        
                    # Print time (inference + NMS)
                    # print(f'{s}Done. ({(1E3 * (t2 - t1)):.1f}ms) Inference, ({(1E3 * (t3 - t2)):.1f}ms) NMS')

                # 교수님 코드
                # detections = y.data
                detections = y
                scale = torch.Tensor([w, h, w, h])
                
                # for i in range(detections.size(1)):
                #     j = 0
                #     while detections[0, i, j, 0] > conf_th:
                #         score = detections[0, i, j, 0]
                #         pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
                #         bbox = (pt[0], pt[1], pt[2], pt[3], score)
                #         bboxes = np.vstack((bboxes, bbox))
                #         j += 1

            # keep = nms_(bboxes, 0.1) ## nms?
            # bboxes = bboxes[keep]
        return bboxes
        # return y


def get_stream_video():
    # camera 정의
    
    cam = cv2.VideoCapture('rtsp://astrodom:hdci12@192.168.170.73:554/stream1')

    model = Net()    
    # print(model)
    # pdb.set_trace()
    while True:
        # 카메라 값 불러오기
        success, frame = cam.read() 
        # print(frame)
        # print(type(frame))
        if not success:
            break
        else:
            # frame을 byte로 변경 후 특정 식??으로 변환 후에
            # yield로 하나씩 넘겨준다.

            # ret, buffer = cv2.imencode('.jpeg', frame)
            # decode_img = cv2.imdecode(buffer, 1)
            # frame = cv2.imencode(model.detect_faces(buffer)).tobytes()
            image_np = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            image_np = cv2.resize(image_np, (640, 384))
            # pdb.set_trace()
            frame = model.detect_faces(image_np).tobytes()
            
            # frame = buffer.tobytes()
            # print(type(frame))
            # pdb.set_trace()
            yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + bytearray(frame) + b'\r\n')


@app.route('/')
def index():
    return render_template('index.html')


# 스트리밍 경로를 /video 경로로 설정.
@app.route("/video", methods=['GET'])
def video():
    # StringResponse함수를 return하고,
    # 인자로 OpenCV에서 가져온 "바이트"이미지와 type을 명시
    return Response(get_stream_video(), mimetype="multipart/x-mixed-replace; boundary=frame")


# ipcam 열기
@app.route("/stream", methods=['GET'])
def stream():
    print("here")
    result = subprocess.run(['python3', '/root/helmet_det/yolov7-main/detect.py', '--source', 'rtsp://astrodom:hdci12@192.168.170.73:554/stream1', '--weights', '/root/helmet_det/yolov7-main/models/best.pt'])
    print(result)
    return result
# rtsp://astrodom:hdci12@192.168.170.73:554/stream1



if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)