123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208 |
- import os
- from utils import *
- from EmbedNet import *
- import torchvision.transforms as transforms
- from detectors import S3FD
- import argparse
- def createParser():
- # hello world 123
- parser = argparse.ArgumentParser(description = "FaceNet");
- parser.add_argument('--config', type=str, default=None, help='Config YAML file');
- ## Data loader
- parser.add_argument('--batch_size', type=int, default=200, help='Batch size, number of classes per batch');
- parser.add_argument('--max_img_per_cls', type=int, default=500, help='Maximum number of images per class per epoch');
- parser.add_argument('--nDataLoaderThread', type=int, default=5, help='Number of loader threads');
- ## Training details
- parser.add_argument('--test_interval', type=int, default=5, help='Test and save every [test_interval] epochs');
- parser.add_argument('--max_epoch', type=int, default=100, help='Maximum number of epochs');
- parser.add_argument('--trainfunc', type=str, default="softmax", help='Loss function');
- ## Optimizer
- parser.add_argument('--optimizer', type=str, default="adam", help='sgd or adam');
- parser.add_argument('--scheduler', type=str, default="steplr", help='Learning rate scheduler');
- parser.add_argument('--lr', type=float, default=0.001, help='Learning rate');
- parser.add_argument("--lr_decay", type=float, default=0.90, help='Learning rate decay every [test_interval] epochs');
- parser.add_argument('--weight_decay', type=float, default=0, help='Weight decay in the optimizer');
- ## Loss functions
- parser.add_argument("--hard_prob", type=float, default=0.5, help='Hard negative mining probability, otherwise random, only for some loss functions');
- parser.add_argument("--hard_rank", type=int, default=10, help='Hard negative mining rank in the batch, only for some loss functions');
- parser.add_argument('--margin', type=float, default=0.1, help='Loss margin, only for some loss functions');
- parser.add_argument('--scale', type=float, default=30, help='Loss scale, only for some loss functions');
- parser.add_argument('--nPerClass', type=int, default=1, help='Number of images per class per batch, only for metric learning based losses');
- parser.add_argument('--nClasses', type=int, default=8700, help='Number of classes in the softmax layer, only for softmax-based losses');
- ## Load and save
- parser.add_argument('--initial_model', type=str, default="./models/amsoft_model.model", help='Initial model weights');
- parser.add_argument('--save_path', type=str, default="exps/exp1", help='Path for model and logs');
- ## Training and test data
- parser.add_argument('--train_path', type=str, default="data/vggface2", help='Absolute path to the train set');
- parser.add_argument('--train_ext', type=str, default="jpg", help='Training files extension');
- parser.add_argument('--test_path', type=str, default="data/test", help='Absolute path to the test set');
- parser.add_argument('--test_list', type=str, default="data/test_list.csv", help='Evaluation list');
- ## Model definition
- parser.add_argument('--model', type=str, default="ResNet18", help='Name of model definition');
- parser.add_argument('--nOut', type=int, default=512, help='Embedding size in the last FC layer');
- ## For test only
- parser.add_argument('--eval', dest='eval', action='store_true', help='Eval only')
- ## For server
- parser.add_argument('--server', dest='server', action='store_true', help='Server mode')
- parser.add_argument('--feat_save_path', type=str, default='saved_feats', help='Absolute path to the feature')
- parser.add_argument('--port', type=int, default=10000, help='Port for the server')
- ## Distributed and mixed precision training
- parser.add_argument('--mixedprec', dest='mixedprec', action='store_true', help='Enable mixed precision training')
- args = parser.parse_args()
- return args
- def loadParameters(model, path):
- state = model.state_dict()
- loaded_state = torch.load(path)
- for name, param in loaded_state.items():
- origname = name;
- if name not in state:
- if name not in state:
- print("%s is not in the model."%origname);
- continue;
- if state[name].size() != loaded_state[origname].size():
- print("Wrong parameter length: %s, model: %s, loaded: %s"%(origname, state[name].size(), loaded_state[origname].size()));
- continue;
- state[name].copy_(param);
- DET = S3FD(device='cuda')
- app = Flask(__name__)
- args = createParser()
- UNKNOWN_THRESHOLD = 0.5
- s = EmbedNet(**vars(args)).cuda()
- transform = transforms.Compose(
- [transforms.ToTensor(),
- transforms.Resize(256),
- transforms.CenterCrop([224,224]),
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
- # trainer = ModelTrainer(s, **vars(args))
- loadParameters(s, args.initial_model)
- s.eval()
- @app.route('/query', methods=['POST'])
- def query():
- # unpack the received data
- data = pickle.loads(request.get_data())
- image = data['img']
- image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
- bboxes = DET.detect_faces(image_np, conf_th=0.9, scales=[0.5])
- if len(bboxes) != 1:
- return "fail"
- bsi = 100
- sx = int((bboxes[0][0]+bboxes[0][2])/2) + bsi
- sy = int((bboxes[0][1]+bboxes[0][3])/2) + bsi
- ss = int(max((bboxes[0][3]-bboxes[0][1]),(bboxes[0][2]-bboxes[0][0]))/2)
- image = numpy.pad(image,((bsi,bsi),(bsi,bsi),(0,0)), 'constant', constant_values=(110,110))
- face = image[int(sy-ss):int(sy+ss),int(sx-ss):int(sx+ss)]
- face = cv2.resize(face,(240,240))
- im1 = Image.fromarray(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
- inp1 = transform(im1).cuda()
- com_feat = s(inp1).detach().cpu()
- files = glob.glob(os.path.join(args.feat_save_path, '*.pt'))
-
- max_score = 0
- pname = 'none'
- for file in files:
- ref_feat = torch.load(file)
- score = F.cosine_similarity(ref_feat, com_feat)
- if(score>max_score) :
- max_score = score.item()
- pname = file.split('/')[1].split('.')[0]
- print('{} {:.2f}'.format(file,score.item()))
- if max_score < UNKNOWN_THRESHOLD:
- max_score = 0
- pname = "Unknown"
-
- return {
- "file":pname,
- "score":max_score,
- "x1":bboxes[0][0],
- "y1":bboxes[0][1],
- "x2":bboxes[0][2],
- "y2":bboxes[0][3]
- }
- @app.route('/enroll', methods=['POST'])
- def enroll():
- # unpack the received data
- data = pickle.loads(request.get_data())
- iname = data['name']
- image = data['img']
- image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
- bboxes = DET.detect_faces(image_np, conf_th=0.9, scales=[0.5])
- bsi = 100
- sx = int((bboxes[0][0]+bboxes[0][2])/2) + bsi
- sy = int((bboxes[0][1]+bboxes[0][3])/2) + bsi
- ss = int(max((bboxes[0][3]-bboxes[0][1]),(bboxes[0][2]-bboxes[0][0]))/2)
- image = numpy.pad(image,((bsi,bsi),(bsi,bsi),(0,0)), 'constant', constant_values=(110,110))
- face = image[int(sy-ss):int(sy+ss),int(sx-ss):int(sx+ss)]
- face = cv2.resize(face,(240,240))
- # TO-DO / 2022-08-25
- # 0. Client 요구사항 : Enroll 시 종료 시까지 지속해서 사진 전송, 입력값(Name)은 중복없이 고유한 값이라고 가정
- # 1. 인물별 폴더에 이미지를 저장
- # 2. 이미지 저장 시 중복 방지 처리
- # 3. 인물별 폴더의 사진들을 centroid를 통해 feature 추출
- if not(os.path.exists(args.feat_save_path)):
- os.makedirs(args.feat_save_path)
-
- cv2.imwrite(os.path.join(args.feat_save_path, '{}.jpg'.format(iname)),face)
- im1 = Image.fromarray(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
- inp1 = transform(im1).cuda()
- ref_feat = s(inp1).detach().cpu()
- torch.save(ref_feat, os.path.join(args.feat_save_path,'{}.pt'.format(iname)))
- return "success"
- if __name__ == "__main__":
- app.run(host='0.0.0.0', debug=True, port=args.port, threaded=False)
|