Browse Source

build: Save multiple picture and calculate centroid

soohyunkim 2 years ago
parent
commit
3ece00984b
2 changed files with 85 additions and 19 deletions
  1. 5 4
      client.py
  2. 80 15
      server.py

+ 5 - 4
client.py

@@ -3,7 +3,7 @@ import requests
 import pickle
 import json
 import sys
-
+import time
 
 HOST = 'http://192.168.100.92:10016'
 url = HOST + '/query'
@@ -31,9 +31,10 @@ while True:
                 img = image.copy()
                 res = requests.post(HOST + '/enroll', pickle.dumps({'img': image, 'name': iname}))
                 print('enroll', res.text)
-                if not res.text == 'fail':
-                    cv2.destroyWindow("GFG")
-                    break
+            if cv2.waitKey(5) & 0xFF == ord('q'):
+                cv2.destroyWindow("GFG")
+                requests.post(HOST + '/cal', pickle.dumps({'name': iname}))
+                break
     else:
         print('※ Query mode start, Exit key is "q" ※')
         while True:

+ 80 - 15
server.py

@@ -1,5 +1,5 @@
 
-import os
+import os, datetime, numpy as np
 from utils import *
 from EmbedNet import *
 import torchvision.transforms as transforms
@@ -56,6 +56,7 @@ def createParser():
     ## For server
     parser.add_argument('--server',             dest='server',  action='store_true',    help='Server mode')
     parser.add_argument('--feat_save_path',     type=str,       default='saved_feats',  help='Absolute path to the feature')
+    parser.add_argument('--img_save_path',     type=str,       default='saved_img',  help='Absolute path to the image')
     parser.add_argument('--port',               type=int,       default=10000,          help='Port for the server')
 
     ## Distributed and mixed precision training
@@ -82,6 +83,30 @@ def loadParameters(model, path):
 
         state[name].copy_(param);
 
+class your_dataset(torch.utils.data.Dataset):
+    def __init__(self, files):
+
+        self.data   = files
+
+        print('{:d} files in the dataset'.format(len(self.data)))
+
+    def __getitem__(self, index):
+
+      fname = self.data[index]
+    
+      try:
+        # return image if read is successful
+        image = cv2.imread(fname)
+        image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+        return image, image_np, fname
+      except:
+        # return empty if not successful
+        return np.array([]), np.array([]), fname
+
+    def __len__(self):
+      return len(self.data)
+
+
 DET = S3FD(device='cuda')
 app = Flask(__name__)
 args = createParser()
@@ -101,6 +126,51 @@ loadParameters(s, args.initial_model)
 s.eval()
 
 
+@app.route('/cal', methods=['POST'])
+def calculate():
+    # unpack the received data
+    data = pickle.loads(request.get_data())
+
+    iname = data['name']
+
+    image_save_path = os.path.join(args.img_save_path, iname)
+
+    files = glob.glob(os.path.join(image_save_path, '{}*.jpg'.format(iname)))
+
+    dataset = your_dataset(files)
+    loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, num_workers=10)
+
+    embedding_list = ''
+    for data in loader:
+        image     = data[0][0].numpy()
+        image_np  = data[1][0].numpy()
+        fname     = data[2][0].split('/')[1]
+        bboxes = DET.detect_faces(image_np, conf_th=0.9, scales=[0.5])
+
+        bsi = 100
+
+        sx = int((bboxes[0][0]+bboxes[0][2])/2) + bsi
+        sy = int((bboxes[0][1]+bboxes[0][3])/2) + bsi
+        ss = int(max((bboxes[0][3]-bboxes[0][1]),(bboxes[0][2]-bboxes[0][0]))/2)
+
+        image = numpy.pad(image,((bsi,bsi),(bsi,bsi),(0,0)), 'constant', constant_values=(110,110))
+
+        face = image[int(sy-ss):int(sy+ss),int(sx-ss):int(sx+ss)]
+        face = cv2.resize(face,(240,240))
+        
+        im1 = Image.fromarray(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
+
+        inp1 = transform(im1).cuda()
+
+        ref_feat = s(inp1).detach().cpu()
+
+        embedding_list = torch.cat([ref_feat, embedding_list]) if embedding_list != '' else ref_feat
+    
+    embedding_mean    = torch.mean(embedding_list,dim=0,keepdim=True)
+    torch.save(embedding_mean, os.path.join(args.feat_save_path,'{}.pt'.format(fname)))
+    
+    return 'success'
+
 @app.route('/query', methods=['POST'])
 def query():
     # unpack the received data
@@ -138,7 +208,6 @@ def query():
     for file in files:
 
         ref_feat = torch.load(file)
-
         score = F.cosine_similarity(ref_feat, com_feat)
         if(score>max_score) :
             max_score = score.item()
@@ -146,7 +215,7 @@ def query():
 
         print('{} {:.2f}'.format(file,score.item()))
 
-    if max_score < UNKNOWN_THRESHOLD:
+    if max_score < 0.1:
         max_score = 0
         pname = "Unknown"
             
@@ -171,6 +240,9 @@ def enroll():
 
     bboxes = DET.detect_faces(image_np, conf_th=0.9, scales=[0.5])
 
+    if len(bboxes) != 1:
+        return "fail"
+
     bsi = 100
 
     sx = int((bboxes[0][0]+bboxes[0][2])/2) + bsi
@@ -182,23 +254,16 @@ def enroll():
     face = image[int(sy-ss):int(sy+ss),int(sx-ss):int(sx+ss)]
     face = cv2.resize(face,(240,240))
 
-    # TO-DO / 2022-08-25
-    # 0. Client 요구사항 : Enroll 시 종료 시까지 지속해서 사진 전송, 입력값(Name)은 중복없이 고유한 값이라고 가정
-    # 1. 인물별 폴더에 이미지를 저장
-    # 2. 이미지 저장 시 중복 방지 처리
-    # 3. 인물별 폴더의 사진들을 centroid를 통해 feature 추출
+    now = datetime.datetime.now().strftime('%y-%m-%d-%H-%M-%f')
+    image_save_path = os.path.join(args.img_save_path, iname)
 
     if not(os.path.exists(args.feat_save_path)):
         os.makedirs(args.feat_save_path)
-            
-    cv2.imwrite(os.path.join(args.feat_save_path, '{}.jpg'.format(iname)),face)
-    im1 = Image.fromarray(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
-
-    inp1 = transform(im1).cuda()
 
-    ref_feat = s(inp1).detach().cpu()
+    if not(os.path.exists(image_save_path)):
+        os.makedirs(image_save_path)
 
-    torch.save(ref_feat, os.path.join(args.feat_save_path,'{}.pt'.format(iname)))
+    cv2.imwrite(os.path.join(image_save_path, '{}_{}.jpg'.format(iname, now)), face)
 
     return "success"