Bladeren bron

First commit

soohyunkim 2 jaren geleden
commit
15ce733d9d

+ 3 - 0
.gitignore

@@ -0,0 +1,3 @@
+saved_feats/*
+data/*
+__pycache__

+ 141 - 0
DatasetLoader.py

@@ -0,0 +1,141 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torch
+import numpy
+import random
+import pdb
+import glob
+import os
+from torch.utils.data import Dataset, DataLoader
+from PIL import Image
+
+def round_down(num, divisor):
+    return num - (num%divisor)
+
+def worker_init_fn(worker_id):
+    numpy.random.seed(numpy.random.get_state()[1][0] + worker_id)
+
+class meta_loader(Dataset):
+    def __init__(self, train_path, train_ext, transform):
+        
+        ## Read Training Files
+        files = glob.glob('%s/*/*.%s'%(train_path,train_ext))
+
+        ## Make a mapping from Class Name to Class Number
+        dictkeys = list(set([x.split('/')[-2] for x in files]))
+        dictkeys.sort()
+        dictkeys = { key : ii for ii, key in enumerate(dictkeys) }
+
+        self.transform  = transform
+
+        self.label_dict = {}
+        self.data_list  = []
+        self.data_label = []
+        
+        for lidx, file in enumerate(files):
+            speaker_name = file.split('/')[-2]
+            speaker_label = dictkeys[speaker_name];
+
+            if not (speaker_label in self.label_dict):
+                self.label_dict[speaker_label] = [];
+
+            self.label_dict[speaker_label].append(lidx);
+            
+            self.data_label.append(speaker_label)
+            self.data_list.append(file)
+
+        print('%d files from %d classes found.'%(len(self.data_list),len(self.label_dict)))
+
+    def __getitem__(self, indices):
+
+        feat = []
+        for index in indices:
+            feat.append(self.transform(Image.open(self.data_list[index])));
+        feat = numpy.stack(feat, axis=0)
+
+        return torch.FloatTensor(feat), self.data_label[index]
+
+    def __len__(self):
+
+        return len(self.data_list)
+
+class test_dataset_loader(Dataset):
+    def __init__(self, test_list, test_path, transform, **kwargs):
+        self.test_path  = test_path
+        self.data_list  = test_list
+        self.transform  = transform
+
+    def __getitem__(self, index):
+        img = Image.open(os.path.join(self.test_path, self.data_list[index]))
+        return self.transform(img), self.data_list[index]
+
+    def __len__(self):
+        return len(self.data_list)
+
+
+class meta_sampler(torch.utils.data.Sampler):
+    def __init__(self, data_source, nPerClass, max_img_per_cls, batch_size):
+
+        self.label_dict         = data_source.label_dict
+        self.nPerClass          = nPerClass
+        self.max_img_per_cls    = max_img_per_cls;
+        self.batch_size         = batch_size;
+        
+    def __iter__(self):
+        
+        ## Get a list of identities
+        dictkeys = list(self.label_dict.keys());
+        dictkeys.sort()
+
+        lol = lambda lst, sz: [lst[i:i+sz] for i in range(0, len(lst), sz)]
+
+        flattened_list = []
+        flattened_label = []
+
+        ## Data for each class
+        for findex, key in enumerate(dictkeys):
+            data    = self.label_dict[key]
+            numSeg  = round_down(min(len(data),self.max_img_per_cls),self.nPerClass)
+            
+            rp      = lol(numpy.random.permutation(len(data))[:numSeg],self.nPerClass)
+            flattened_label.extend([findex] * (len(rp)))
+            for indices in rp:
+                flattened_list.append([data[i] for i in indices])
+
+        ## Data in random order
+        mixid           = numpy.random.permutation(len(flattened_label))
+        mixlabel        = []
+        mixmap          = []
+
+        ## Prevent two pairs of the same speaker in the same batch
+        for ii in mixid:
+            startbatch = len(mixlabel) - len(mixlabel) % self.batch_size
+            if flattened_label[ii] not in mixlabel[startbatch:]:
+                mixlabel.append(flattened_label[ii])
+                mixmap.append(ii)
+        
+        return iter([flattened_list[i] for i in mixmap])
+    
+    def __len__(self):
+        return len(self.data_source)
+
+def get_data_loader(batch_size, max_img_per_cls, nDataLoaderThread, nPerClass, train_path, train_ext, transform, **kwargs):
+    
+    train_dataset = meta_loader(train_path, train_ext, transform)
+
+    train_sampler = meta_sampler(train_dataset, nPerClass, max_img_per_cls, batch_size)
+
+    train_loader = torch.utils.data.DataLoader(
+        train_dataset,
+        batch_size=batch_size,
+        num_workers=nDataLoaderThread,
+        sampler=train_sampler,
+        pin_memory=False,
+        worker_init_fn=worker_init_fn,
+        drop_last=True,
+    )
+    
+    return train_loader
+
+

+ 222 - 0
EmbedNet.py

@@ -0,0 +1,222 @@
+#!/usr/bin/python
+#-*- coding: utf-8 -*-
+
+import torch, pickle
+import torch.nn as nn
+import torch.nn.functional as F
+import numpy, math, pdb, sys
+import time, importlib
+from DatasetLoader import test_dataset_loader
+from torch.cuda.amp import autocast, GradScaler
+import cv2
+import glob
+from PIL import Image
+
+from flask import Flask, request
+
+class EmbedNet(nn.Module):
+
+    def __init__(self, model, optimizer, trainfunc, nPerClass, **kwargs):
+        super(EmbedNet, self).__init__();
+
+        ## __S__ is the embedding model
+        EmbedNetModel = importlib.import_module('models.'+model).__getattribute__('MainModel')
+        self.__S__ = EmbedNetModel(**kwargs);
+
+        ## __L__ is the classifier plus the loss function
+        LossFunction = importlib.import_module('loss.'+trainfunc).__getattribute__('LossFunction')
+        self.__L__ = LossFunction(**kwargs);
+
+        ## Number of examples per identity per batch
+        self.nPerClass = nPerClass
+
+    def forward(self, data, label=None):
+
+        data    = data.reshape(-1,data.size()[-3],data.size()[-2],data.size()[-1])
+        outp    = self.__S__.forward(data)
+
+        if label == None:
+            return outp
+
+        else:
+            outp    = outp.reshape(self.nPerClass,-1,outp.size()[-1]).transpose(1,0).squeeze(1)
+            nloss, prec1 = self.__L__.forward(outp,label)
+            return nloss, prec1
+
+
+class ModelTrainer(object):
+
+    def __init__(self, embed_model, optimizer, scheduler, mixedprec, **kwargs):
+
+        self.__model__  = embed_model
+
+        ## Optimizer (e.g. Adam or SGD)
+        Optimizer = importlib.import_module('optimizer.'+optimizer).__getattribute__('Optimizer')
+        self.__optimizer__ = Optimizer(self.__model__.parameters(), **kwargs)
+
+        ## Learning rate scheduler
+        Scheduler = importlib.import_module('scheduler.'+scheduler).__getattribute__('Scheduler')
+        self.__scheduler__, self.lr_step = Scheduler(self.__optimizer__, **kwargs)
+
+        ## For mixed precision training
+        self.scaler = GradScaler() 
+        self.mixedprec = mixedprec
+
+        assert self.lr_step in ['epoch', 'iteration']
+
+    # ## ===== ===== ===== ===== ===== ===== ===== =====
+    # ## Train network
+    # ## ===== ===== ===== ===== ===== ===== ===== =====
+
+    def train_network(self, loader, verbose):
+
+        self.__model__.train();
+
+        stepsize = loader.batch_size;
+
+        counter = 0;
+        index   = 0;
+        loss    = 0;
+        top1    = 0     # EER or accuracy
+
+        tstart = time.time()
+        
+        for data, label in loader:
+
+            data    = data.transpose(1,0)
+
+            ## Reset gradients
+            self.__model__.zero_grad();
+
+            ## Forward and backward passes
+            if self.mixedprec:
+                with autocast():
+                    nloss, prec1 = self.__model__(data.cuda(), label.cuda())
+                self.scaler.scale(nloss).backward();
+                self.scaler.step(self.__optimizer__);
+                self.scaler.update();       
+            else:
+                nloss, prec1 = self.__model__(data.cuda(), label.cuda())
+                nloss.backward();
+                self.__optimizer__.step();
+
+            loss    += nloss.detach().cpu();
+            top1    += prec1.detach().cpu();
+            counter += 1;
+            index   += stepsize;
+
+            telapsed = time.time() - tstart
+            tstart = time.time()
+
+            if verbose:
+                sys.stdout.write("\rProcessing (%d) "%(index));
+                sys.stdout.write("Loss %f TEER/TAcc %2.3f%% - %.2f Hz "%(loss/counter, top1/counter, stepsize/telapsed));
+                sys.stdout.flush();
+
+            if self.lr_step == 'iteration': self.__scheduler__.step()
+
+        if self.lr_step == 'epoch': self.__scheduler__.step()
+
+        sys.stdout.write("\n");
+        
+        return (loss/counter, top1/counter);
+
+
+    ## ===== ===== ===== ===== ===== ===== ===== =====
+    ## Evaluate from list
+    ## ===== ===== ===== ===== ===== ===== ===== =====
+
+    def evaluateFromList(self, test_list, test_path, nDataLoaderThread, transform, print_interval=100, num_eval=10, **kwargs):
+        
+        self.__model__.eval();
+        
+        feats       = {}
+        tstart      = time.time()
+
+        ## Read all lines
+        with open(test_list) as f:
+            lines = f.readlines()
+
+        ## Get a list of unique file names
+        files = sum([x.strip().split(',')[-2:] for x in lines],[])
+        setfiles = list(set(files))
+        setfiles.sort()
+
+        ## Define test data loader
+        test_dataset = test_dataset_loader(setfiles, test_path, transform=transform, num_eval=num_eval, **kwargs)
+        test_loader = torch.utils.data.DataLoader(
+            test_dataset,
+            batch_size=1,
+            shuffle=False,
+            num_workers=nDataLoaderThread,
+            drop_last=False,
+        )
+
+        ## Extract features for every image
+        for idx, data in enumerate(test_loader):
+            inp1                = data[0][0].cuda()
+            ref_feat            = self.__model__(inp1).detach().cpu()
+            feats[data[1][0]]   = ref_feat
+            telapsed            = time.time() - tstart
+
+            if idx % print_interval == 0:
+                sys.stdout.write("\rReading %d of %d: %.2f Hz, embedding size %d"%(idx,len(setfiles),idx/telapsed,ref_feat.size()[1]));
+
+        print('')
+        all_scores = [];
+        all_labels = [];
+        tstart = time.time()
+
+        ## Read files and compute all scores
+        for idx, line in enumerate(lines):
+
+            data = line.strip().split(',');
+
+            ref_feat = feats[data[1]]
+            com_feat = feats[data[2]]
+
+            score = F.cosine_similarity(ref_feat, com_feat)
+
+            all_scores.append(score);  
+            all_labels.append(int(data[0]));
+
+            if idx % print_interval == 0:
+                telapsed = time.time() - tstart
+                sys.stdout.write("\rComputing %d of %d: %.2f Hz"%(idx,len(lines),idx/telapsed));
+                sys.stdout.flush();
+
+        print('')
+
+        return (all_scores, all_labels);
+
+
+    ## ===== ===== ===== ===== ===== ===== ===== =====
+    ## Save parameters
+    ## ===== ===== ===== ===== ===== ===== ===== =====
+
+    def saveParameters(self, path):
+        
+        torch.save(self.__model__.state_dict(), path);
+
+
+    ## ===== ===== ===== ===== ===== ===== ===== =====
+    ## Load parameters
+    ## ===== ===== ===== ===== ===== ===== ===== =====
+
+    def loadParameters(self, path):
+
+        self_state = self.__model__.state_dict();
+        loaded_state = torch.load(path);
+        for name, param in loaded_state.items():
+            origname = name;
+            if name not in self_state:
+                if name not in self_state:
+                    print("%s is not in the model."%origname);
+                    continue;
+
+            if self_state[name].size() != loaded_state[origname].size():
+                print("Wrong parameter length: %s, model: %s, loaded: %s"%(origname, self_state[name].size(), loaded_state[origname].size()));
+                continue;
+
+            self_state[name].copy_(param);
+

+ 19 - 0
LICENSE.md

@@ -0,0 +1,19 @@
+Copyright (c) 2020-present Joon Son Chung
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 102 - 0
README.md

@@ -0,0 +1,102 @@
+# Face embedding trainer
+
+This repository contains the framework for training deep embeddings for face recognition. The trainer is intended for the face recognition exercise of the [Machine Learning for Visual Understanding](https://github.com/joonson/mlvu2020) course, but the flexible code can be used for any meta-learning task. This is an adaptation of the [speaker recognition model trainer](https://github.com/clovaai/voxceleb_trainer) described in [_In defence of metric learning for speaker recognition_](https://arxiv.org/abs/2003.11982).
+
+### Dependencies
+```
+pip install -r requirements.txt
+```
+
+### Training examples
+
+- AM-Softmax:
+```
+python ./trainEmbedNet.py --model ResNet18 --trainfunc amsoftmax --save_path exps/exp1 --nClasses 2622 --batch_size 200 --scale 30 --margin 0.2
+```
+
+- Angular prototypical:
+```
+python ./trainEmbedNet.py --model ResNet18 --trainfunc angleproto --save_path exps/exp2 --nPerClass 2 --batch_size 200
+```
+
+The arguments can also be passed as `--config path_to_config.yaml`. Note that the configuration file overrides the arguments passed via command line.
+
+Use `--mixedprec` flag to enable mixed precision training. This is recommended for Tesla V100, GeForce RTX 20 series or later models.
+
+### Implemented loss functions
+```
+Softmax (softmax)
+AM-Softmax (amsoftmax)
+AAM-Softmax (aamsoftmax)
+GE2E (ge2e)
+Prototypical (proto)
+Triplet (triplet)
+Angular Prototypical (angleproto)
+Angular Prototypical + Softmax (softmaxproto)
+```
+
+For softmax-based losses, `nPerClass` should be 1, and `nClasses` must be specified. For metric-based losses, `nPerClass` should be 2 or more. For `softmaxproto`, `nPerClass` should be 2 and `nClasses` must also be specified.
+
+### Implemented models
+```
+ResNet18
+ResNeXt50
+```
+
+### Adding new models and loss functions
+
+You can add new models and loss functions to `models` and `loss` directories respectively. See the existing definitions for examples.
+
+### Data
+
+The test list should contain labels and image pairs, one line per pair, as follows. `1` is a target and `0` is an imposter.
+```
+1,id10001/00001.jpg,id10001/00002.jpg
+0,id10001/00003.jpg,id10002/00001.jpg
+```
+
+The folders in the training set should contain images for each identity (i.e. `identity/image.jpg`).
+
+The input transformations can be changed in the code.
+
+### Citation
+
+Please cite the following if you make use of the code.
+
+```
+@inproceedings{chung2020in,
+  title={In defence of metric learning for speaker recognition},
+  author={Chung, Joon Son and Huh, Jaesung and Mun, Seongkyu and Lee, Minjae and Heo, Hee Soo and Choe, Soyeon and Ham, Chiheon and Jung, Sunghwan and Lee, Bong-Jin and Han, Icksang},
+  booktitle={Interspeech},
+  year={2020}
+}
+```
+
+### Server
+
+```
+python ./server.py
+```
+
+### License
+```
+Copyright (c) 2020-present Joon Son Chung.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+```

+ 65 - 0
client.py

@@ -0,0 +1,65 @@
+import cv2
+import requests
+import pickle
+import json
+import sys
+
+
+HOST = 'http://192.168.100.92:10016'
+url = HOST + '/query'
+cam_port = 0
+cam = cv2.VideoCapture(cam_port)
+if not cam.isOpened():
+    print("Unable to read camera feed")
+    sys.exit()
+imgid = 0
+while True:
+    print('====Select mode====\n1. Query mode(Defalut) \n2. Enroll mode\n3. Exit')
+    print('> ', end='')
+    mode = input()
+    mode = 1 if not mode.isnumeric() else int(mode)
+    if mode == 3:
+        break
+    elif mode == 2:
+        print("what's your name?")
+        iname = input()
+        print('Picture key is "e"')
+        while True:
+            result, image = cam.read()
+            cv2.imshow("GFG", image)
+            if cv2.waitKey(5) & 0xFF == ord('e'):
+                img = image.copy()
+                res = requests.post(HOST + '/enroll', pickle.dumps({'img': image, 'name': iname}))
+                print('enroll', res.text)
+                if not res.text == 'fail':
+                    cv2.destroyWindow("GFG")
+                    break
+    else:
+        print('※ Query mode start, Exit key is "q" ※')
+        while True:
+            result, image = cam.read()
+
+            img = image.copy()
+            res = requests.post(url, pickle.dumps({'img': img, 'name': ''}))
+            imgid += 1
+            if res.text == 'fail':
+                cv2.putText(image, "fail", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
+                cv2.imshow("GFG", img)
+            else:
+                info = json.loads(res.text)
+                x1 = info['x1']
+                y1 = info['y1']
+                x2 = info['x2']
+                y2 = info['y2']
+                score = round(info['score'] * 100, 1)
+                confi = info['file'] + ' ' + str(score) + '%' if info['file'] != 'Unknown' else info['file']
+                if result:
+                    cv2.putText(image, confi, (int(x2)+20, int(y2)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
+                    cv2.rectangle(image, (int(x1), int(y1)), (int(x2), int(y2)), (0, 0, 255), 3)
+                    cv2.imshow("GFG", image)
+                else:
+                    break
+            if cv2.waitKey(5) & 0xFF == ord('q'):
+                break
+        cv2.destroyWindow("GFG")
+cam.release()

BIN
detectors/__MACOSX/.___init__.py


BIN
detectors/__MACOSX/._s3fd


BIN
detectors/__MACOSX/s3fd/._.DS_Store


BIN
detectors/__MACOSX/s3fd/.___init__.py


BIN
detectors/__MACOSX/s3fd/.___pycache__


BIN
detectors/__MACOSX/s3fd/._box_utils.py


BIN
detectors/__MACOSX/s3fd/._nets.py


BIN
detectors/__MACOSX/s3fd/._weights


BIN
detectors/__MACOSX/s3fd/weights/._sfd_face.pth


+ 3 - 0
detectors/__init__.py

@@ -0,0 +1,3 @@
+## From https://github.com/cs-giung/face-detection-pytorch
+
+from .s3fd import S3FD

BIN
detectors/s3fd/.DS_Store


+ 61 - 0
detectors/s3fd/__init__.py

@@ -0,0 +1,61 @@
+import time
+import numpy as np
+import cv2
+import torch
+from torchvision import transforms
+from .nets import S3FDNet
+from .box_utils import nms_
+
+PATH_WEIGHT = './detectors/s3fd/weights/sfd_face.pth'
+img_mean = np.array([104., 117., 123.])[:, np.newaxis, np.newaxis].astype('float32')
+
+
+class S3FD():
+
+    def __init__(self, device='cuda'):
+
+        tstamp = time.time()
+        self.device = device
+
+        print('[S3FD] loading with', self.device)
+        self.net = S3FDNet(device=self.device).to(self.device)
+        state_dict = torch.load(PATH_WEIGHT, map_location=self.device)
+        self.net.load_state_dict(state_dict)
+        self.net.eval()
+        print('[S3FD] finished loading (%.4f sec)' % (time.time() - tstamp))
+    
+    def detect_faces(self, image, conf_th=0.8, scales=[1]):
+
+        w, h = image.shape[1], image.shape[0]
+
+        bboxes = np.empty(shape=(0, 5))
+
+        with torch.no_grad():
+            for s in scales:
+                scaled_img = cv2.resize(image, dsize=(0, 0), fx=s, fy=s, interpolation=cv2.INTER_LINEAR)
+
+                scaled_img = np.swapaxes(scaled_img, 1, 2)
+                scaled_img = np.swapaxes(scaled_img, 1, 0)
+                scaled_img = scaled_img[[2, 1, 0], :, :]
+                scaled_img = scaled_img.astype('float32')
+                scaled_img -= img_mean
+                scaled_img = scaled_img[[2, 1, 0], :, :]
+                x = torch.from_numpy(scaled_img).unsqueeze(0).to(self.device)
+                y = self.net(x)
+
+                detections = y.data
+                scale = torch.Tensor([w, h, w, h])
+
+                for i in range(detections.size(1)):
+                    j = 0
+                    while detections[0, i, j, 0] > conf_th:
+                        score = detections[0, i, j, 0]
+                        pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
+                        bbox = (pt[0], pt[1], pt[2], pt[3], score)
+                        bboxes = np.vstack((bboxes, bbox))
+                        j += 1
+
+            keep = nms_(bboxes, 0.1)
+            bboxes = bboxes[keep]
+
+        return bboxes

+ 217 - 0
detectors/s3fd/box_utils.py

@@ -0,0 +1,217 @@
+import numpy as np
+from itertools import product as product
+import torch
+from torch.autograd import Function
+
+
+def nms_(dets, thresh):
+    """
+    Courtesy of Ross Girshick
+    [https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/nms/py_cpu_nms.py]
+    """
+    x1 = dets[:, 0]
+    y1 = dets[:, 1]
+    x2 = dets[:, 2]
+    y2 = dets[:, 3]
+    scores = dets[:, 4]
+
+    areas = (x2 - x1) * (y2 - y1)
+    order = scores.argsort()[::-1]
+
+    keep = []
+    while order.size > 0:
+        i = order[0]
+        keep.append(int(i))
+        xx1 = np.maximum(x1[i], x1[order[1:]])
+        yy1 = np.maximum(y1[i], y1[order[1:]])
+        xx2 = np.minimum(x2[i], x2[order[1:]])
+        yy2 = np.minimum(y2[i], y2[order[1:]])
+
+        w = np.maximum(0.0, xx2 - xx1)
+        h = np.maximum(0.0, yy2 - yy1)
+        inter = w * h
+        ovr = inter / (areas[i] + areas[order[1:]] - inter)
+
+        inds = np.where(ovr <= thresh)[0]
+        order = order[inds + 1]
+
+    return np.array(keep).astype(np.int)
+
+
+def decode(loc, priors, variances):
+    """Decode locations from predictions using priors to undo
+    the encoding we did for offset regression at train time.
+    Args:
+        loc (tensor): location predictions for loc layers,
+            Shape: [num_priors,4]
+        priors (tensor): Prior boxes in center-offset form.
+            Shape: [num_priors,4].
+        variances: (list[float]) Variances of priorboxes
+    Return:
+        decoded bounding box predictions
+    """
+
+    boxes = torch.cat((
+        priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
+        priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
+    boxes[:, :2] -= boxes[:, 2:] / 2
+    boxes[:, 2:] += boxes[:, :2]
+    return boxes
+
+
+def nms(boxes, scores, overlap=0.5, top_k=200):
+    """Apply non-maximum suppression at test time to avoid detecting too many
+    overlapping bounding boxes for a given object.
+    Args:
+        boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
+        scores: (tensor) The class predscores for the img, Shape:[num_priors].
+        overlap: (float) The overlap thresh for suppressing unnecessary boxes.
+        top_k: (int) The Maximum number of box preds to consider.
+    Return:
+        The indices of the kept boxes with respect to num_priors.
+    """
+
+    keep = scores.new(scores.size(0)).zero_().long()
+    if boxes.numel() == 0:
+        return keep, 0
+    x1 = boxes[:, 0]
+    y1 = boxes[:, 1]
+    x2 = boxes[:, 2]
+    y2 = boxes[:, 3]
+    area = torch.mul(x2 - x1, y2 - y1)
+    v, idx = scores.sort(0)  # sort in ascending order
+    # I = I[v >= 0.01]
+    idx = idx[-top_k:]  # indices of the top-k largest vals
+    xx1 = boxes.new()
+    yy1 = boxes.new()
+    xx2 = boxes.new()
+    yy2 = boxes.new()
+    w = boxes.new()
+    h = boxes.new()
+
+    # keep = torch.Tensor()
+    count = 0
+    while idx.numel() > 0:
+        i = idx[-1]  # index of current largest val
+        # keep.append(i)
+        keep[count] = i
+        count += 1
+        if idx.size(0) == 1:
+            break
+        idx = idx[:-1]  # remove kept element from view
+        # load bboxes of next highest vals
+        torch.index_select(x1, 0, idx, out=xx1)
+        torch.index_select(y1, 0, idx, out=yy1)
+        torch.index_select(x2, 0, idx, out=xx2)
+        torch.index_select(y2, 0, idx, out=yy2)
+        # store element-wise max with next highest score
+        xx1 = torch.clamp(xx1, min=x1[i])
+        yy1 = torch.clamp(yy1, min=y1[i])
+        xx2 = torch.clamp(xx2, max=x2[i])
+        yy2 = torch.clamp(yy2, max=y2[i])
+        w.resize_as_(xx2)
+        h.resize_as_(yy2)
+        w = xx2 - xx1
+        h = yy2 - yy1
+        # check sizes of xx1 and xx2.. after each iteration
+        w = torch.clamp(w, min=0.0)
+        h = torch.clamp(h, min=0.0)
+        inter = w * h
+        # IoU = i / (area(a) + area(b) - i)
+        rem_areas = torch.index_select(area, 0, idx)  # load remaining areas)
+        union = (rem_areas - inter) + area[i]
+        IoU = inter / union  # store result in iou
+        # keep only elements with an IoU <= overlap
+        idx = idx[IoU.le(overlap)]
+    return keep, count
+
+
+class Detect(object):
+
+    def __init__(self, num_classes=2,
+                    top_k=750, nms_thresh=0.3, conf_thresh=0.05,
+                    variance=[0.1, 0.2], nms_top_k=5000):
+        
+        self.num_classes = num_classes
+        self.top_k = top_k
+        self.nms_thresh = nms_thresh
+        self.conf_thresh = conf_thresh
+        self.variance = variance
+        self.nms_top_k = nms_top_k
+
+    def forward(self, loc_data, conf_data, prior_data):
+
+        num = loc_data.size(0)
+        num_priors = prior_data.size(0)
+
+        conf_preds = conf_data.view(num, num_priors, self.num_classes).transpose(2, 1)
+        batch_priors = prior_data.view(-1, num_priors, 4).expand(num, num_priors, 4)
+        batch_priors = batch_priors.contiguous().view(-1, 4)
+
+        decoded_boxes = decode(loc_data.view(-1, 4), batch_priors, self.variance)
+        decoded_boxes = decoded_boxes.view(num, num_priors, 4)
+
+        output = torch.zeros(num, self.num_classes, self.top_k, 5)
+
+        for i in range(num):
+            boxes = decoded_boxes[i].clone()
+            conf_scores = conf_preds[i].clone()
+
+            for cl in range(1, self.num_classes):
+                c_mask = conf_scores[cl].gt(self.conf_thresh)
+                scores = conf_scores[cl][c_mask]
+                
+                if scores.dim() == 0:
+                    continue
+                l_mask = c_mask.unsqueeze(1).expand_as(boxes)
+                boxes_ = boxes[l_mask].view(-1, 4)
+                ids, count = nms(boxes_, scores, self.nms_thresh, self.nms_top_k)
+                count = count if count < self.top_k else self.top_k
+
+                output[i, cl, :count] = torch.cat((scores[ids[:count]].unsqueeze(1), boxes_[ids[:count]]), 1)
+
+        return output
+
+
+class PriorBox(object):
+
+    def __init__(self, input_size, feature_maps,
+                    variance=[0.1, 0.2],
+                    min_sizes=[16, 32, 64, 128, 256, 512],
+                    steps=[4, 8, 16, 32, 64, 128],
+                    clip=False):
+
+        super(PriorBox, self).__init__()
+
+        self.imh = input_size[0]
+        self.imw = input_size[1]
+        self.feature_maps = feature_maps
+
+        self.variance = variance
+        self.min_sizes = min_sizes
+        self.steps = steps
+        self.clip = clip
+
+    def forward(self):
+        mean = []
+        for k, fmap in enumerate(self.feature_maps):
+            feath = fmap[0]
+            featw = fmap[1]
+            for i, j in product(range(feath), range(featw)):
+                f_kw = self.imw / self.steps[k]
+                f_kh = self.imh / self.steps[k]
+
+                cx = (j + 0.5) / f_kw
+                cy = (i + 0.5) / f_kh
+
+                s_kw = self.min_sizes[k] / self.imw
+                s_kh = self.min_sizes[k] / self.imh
+
+                mean += [cx, cy, s_kw, s_kh]
+
+        output = torch.FloatTensor(mean).view(-1, 4)
+        
+        if self.clip:
+            output.clamp_(max=1, min=0)
+        
+        return output

+ 174 - 0
detectors/s3fd/nets.py

@@ -0,0 +1,174 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import torch.nn.init as init
+from .box_utils import Detect, PriorBox
+
+
+class L2Norm(nn.Module):
+
+    def __init__(self, n_channels, scale):
+        super(L2Norm, self).__init__()
+        self.n_channels = n_channels
+        self.gamma = scale or None
+        self.eps = 1e-10
+        self.weight = nn.Parameter(torch.Tensor(self.n_channels))
+        self.reset_parameters()
+
+    def reset_parameters(self):
+        init.constant_(self.weight, self.gamma)
+
+    def forward(self, x):
+        norm = x.pow(2).sum(dim=1, keepdim=True).sqrt() + self.eps
+        x = torch.div(x, norm)
+        out = self.weight.unsqueeze(0).unsqueeze(2).unsqueeze(3).expand_as(x) * x
+        return out
+
+
+class S3FDNet(nn.Module):
+
+    def __init__(self, device='cuda'):
+        super(S3FDNet, self).__init__()
+        self.device = device
+
+        self.vgg = nn.ModuleList([
+            nn.Conv2d(3, 64, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(64, 64, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.MaxPool2d(2, 2),
+
+            nn.Conv2d(64, 128, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(128, 128, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.MaxPool2d(2, 2),
+            
+            nn.Conv2d(128, 256, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(256, 256, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(256, 256, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.MaxPool2d(2, 2, ceil_mode=True),
+            
+            nn.Conv2d(256, 512, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(512, 512, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(512, 512, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.MaxPool2d(2, 2),
+
+            nn.Conv2d(512, 512, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(512, 512, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(512, 512, 3, 1, padding=1),
+            nn.ReLU(inplace=True),
+            nn.MaxPool2d(2, 2),
+
+            nn.Conv2d(512, 1024, 3, 1, padding=6, dilation=6),
+            nn.ReLU(inplace=True),
+            nn.Conv2d(1024, 1024, 1, 1),
+            nn.ReLU(inplace=True),
+        ])
+
+        self.L2Norm3_3 = L2Norm(256, 10)
+        self.L2Norm4_3 = L2Norm(512, 8)
+        self.L2Norm5_3 = L2Norm(512, 5)
+
+        self.extras = nn.ModuleList([
+            nn.Conv2d(1024, 256, 1, 1),
+            nn.Conv2d(256, 512, 3, 2, padding=1),
+            nn.Conv2d(512, 128, 1, 1),
+            nn.Conv2d(128, 256, 3, 2, padding=1),
+        ])
+        
+        self.loc = nn.ModuleList([
+            nn.Conv2d(256, 4, 3, 1, padding=1),
+            nn.Conv2d(512, 4, 3, 1, padding=1),
+            nn.Conv2d(512, 4, 3, 1, padding=1),
+            nn.Conv2d(1024, 4, 3, 1, padding=1),
+            nn.Conv2d(512, 4, 3, 1, padding=1),
+            nn.Conv2d(256, 4, 3, 1, padding=1),
+        ])
+
+        self.conf = nn.ModuleList([
+            nn.Conv2d(256, 4, 3, 1, padding=1),
+            nn.Conv2d(512, 2, 3, 1, padding=1),
+            nn.Conv2d(512, 2, 3, 1, padding=1),
+            nn.Conv2d(1024, 2, 3, 1, padding=1),
+            nn.Conv2d(512, 2, 3, 1, padding=1),
+            nn.Conv2d(256, 2, 3, 1, padding=1),
+        ])
+
+        self.softmax = nn.Softmax(dim=-1)
+        self.detect = Detect()
+
+    def forward(self, x):
+        size = x.size()[2:]
+        sources = list()
+        loc = list()
+        conf = list()
+
+        for k in range(16):
+            x = self.vgg[k](x)
+        s = self.L2Norm3_3(x)
+        sources.append(s)
+
+        for k in range(16, 23):
+            x = self.vgg[k](x)
+        s = self.L2Norm4_3(x)
+        sources.append(s)
+
+        for k in range(23, 30):
+            x = self.vgg[k](x)
+        s = self.L2Norm5_3(x)
+        sources.append(s)
+
+        for k in range(30, len(self.vgg)):
+            x = self.vgg[k](x)
+        sources.append(x)
+        
+        # apply extra layers and cache source layer outputs
+        for k, v in enumerate(self.extras):
+            x = F.relu(v(x), inplace=True)
+            if k % 2 == 1:
+                sources.append(x)
+
+        # apply multibox head to source layers
+        loc_x = self.loc[0](sources[0])
+        conf_x = self.conf[0](sources[0])
+
+        max_conf, _ = torch.max(conf_x[:, 0:3, :, :], dim=1, keepdim=True)
+        conf_x = torch.cat((max_conf, conf_x[:, 3:, :, :]), dim=1)
+
+        loc.append(loc_x.permute(0, 2, 3, 1).contiguous())
+        conf.append(conf_x.permute(0, 2, 3, 1).contiguous())
+
+        for i in range(1, len(sources)):
+            x = sources[i]
+            conf.append(self.conf[i](x).permute(0, 2, 3, 1).contiguous())
+            loc.append(self.loc[i](x).permute(0, 2, 3, 1).contiguous())
+
+        features_maps = []
+        for i in range(len(loc)):
+            feat = []
+            feat += [loc[i].size(1), loc[i].size(2)]
+            features_maps += [feat]
+
+        loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
+        conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)
+
+        with torch.no_grad():
+            self.priorbox = PriorBox(size, features_maps)
+            self.priors = self.priorbox.forward()
+
+        output = self.detect.forward(
+            loc.view(loc.size(0), -1, 4),
+            self.softmax(conf.view(conf.size(0), -1, 2)),
+            self.priors.type(type(x.data)).to(self.device)
+        )
+
+        return output

BIN
detectors/s3fd/weights/sfd_face.pth


+ 58 - 0
loss/aamsoftmax.py

@@ -0,0 +1,58 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+# Adapted from https://github.com/wujiyang/Face_Pytorch (Apache License)
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import time, pdb, numpy, math
+from utils import accuracy
+
+class LossFunction(nn.Module):
+    def __init__(self, nOut, nClasses, margin=0.3, scale=15, easy_margin=False, **kwargs):
+        super(LossFunction, self).__init__()
+
+        self.test_normalize = True
+        
+        self.m = margin
+        self.s = scale
+        self.in_feats = nOut
+        self.weight = torch.nn.Parameter(torch.FloatTensor(nClasses, nOut), requires_grad=True)
+        self.ce = nn.CrossEntropyLoss()
+        nn.init.xavier_normal_(self.weight, gain=1)
+
+        self.easy_margin = easy_margin
+        self.cos_m = math.cos(self.m)
+        self.sin_m = math.sin(self.m)
+
+        # make the function cos(theta+m) monotonic decreasing while theta in [0°,180°]
+        self.th = math.cos(math.pi - self.m)
+        self.mm = math.sin(math.pi - self.m) * self.m
+
+        print('Initialised AAMSoftmax margin %.3f scale %.3f'%(self.m,self.s))
+
+    def forward(self, x, label=None):
+
+        assert x.size()[0] == label.size()[0]
+        assert x.size()[1] == self.in_feats
+        
+        # cos(theta)
+        cosine = F.linear(F.normalize(x), F.normalize(self.weight))
+        # cos(theta + m)
+        sine = torch.sqrt((1.0 - torch.mul(cosine, cosine)).clamp(0, 1))
+        phi = cosine * self.cos_m - sine * self.sin_m
+
+        if self.easy_margin:
+            phi = torch.where(cosine > 0, phi, cosine)
+        else:
+            phi = torch.where((cosine - self.th) > 0, phi, cosine - self.mm)
+
+        #one_hot = torch.zeros(cosine.size(), device='cuda' if torch.cuda.is_available() else 'cpu')
+        one_hot = torch.zeros_like(cosine)
+        one_hot.scatter_(1, label.view(-1, 1), 1)
+        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)
+        output = output * self.s
+
+        loss    = self.ce(output, label)
+        prec1   = accuracy(output.detach(), label.detach(), topk=(1,))[0]
+        return loss, prec1

+ 45 - 0
loss/amsoftmax.py

@@ -0,0 +1,45 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+# Adapted from https://github.com/CoinCheung/pytorch-loss (MIT License)
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import time, pdb, numpy
+from utils import accuracy
+
+class LossFunction(nn.Module):
+    def __init__(self, nOut, nClasses, margin=0.3, scale=15, **kwargs):
+        super(LossFunction, self).__init__()
+
+        self.test_normalize = True
+        
+        self.m = margin
+        self.s = scale
+        self.in_feats = nOut
+        self.W = torch.nn.Parameter(torch.randn(nOut, nClasses), requires_grad=True)
+        self.ce = nn.CrossEntropyLoss()
+        nn.init.xavier_normal_(self.W, gain=1)
+
+        print('Initialised AMSoftmax m=%.3f s=%.3f'%(self.m,self.s))
+
+    def forward(self, x, label=None):
+
+        assert x.size()[0] == label.size()[0]
+        assert x.size()[1] == self.in_feats
+
+        x_norm = torch.norm(x, p=2, dim=1, keepdim=True).clamp(min=1e-12)
+        x_norm = torch.div(x, x_norm)
+        w_norm = torch.norm(self.W, p=2, dim=0, keepdim=True).clamp(min=1e-12)
+        w_norm = torch.div(self.W, w_norm)
+        costh = torch.mm(x_norm, w_norm)
+        label_view = label.view(-1, 1)
+        if label_view.is_cuda: label_view = label_view.cpu()
+        delt_costh = torch.zeros(costh.size()).scatter_(1, label_view, self.m)
+        if x.is_cuda: delt_costh = delt_costh.cuda()
+        costh_m = costh - delt_costh
+        costh_m_s = self.s * costh_m
+        loss    = self.ce(costh_m_s, label)
+        prec1   = accuracy(costh_m_s.detach(), label.detach(), topk=(1,))[0]
+        return loss, prec1
+

+ 39 - 0
loss/angleproto.py

@@ -0,0 +1,39 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import time, pdb, numpy
+from utils import accuracy
+
+class LossFunction(nn.Module):
+
+    def __init__(self, init_w=10.0, init_b=-5.0, **kwargs):
+        super(LossFunction, self).__init__()
+
+        self.test_normalize = True
+        
+        self.w = nn.Parameter(torch.tensor(init_w))
+        self.b = nn.Parameter(torch.tensor(init_b))
+        self.criterion  = torch.nn.CrossEntropyLoss()
+
+        print('Initialised AngleProto')
+
+    def forward(self, x, label=None):
+
+        assert x.size()[1] >= 2
+
+        out_anchor      = torch.mean(x[:,1:,:],1)
+        out_positive    = x[:,0,:]
+        stepsize        = out_anchor.size()[0]
+
+        cos_sim_matrix  = F.cosine_similarity(out_positive.unsqueeze(-1),out_anchor.unsqueeze(-1).transpose(0,2))
+        torch.clamp(self.w, 1e-6)
+        cos_sim_matrix = cos_sim_matrix * self.w + self.b
+        
+        label   = torch.from_numpy(numpy.asarray(range(0,stepsize))).cuda()
+        nloss   = self.criterion(cos_sim_matrix, label)
+        prec1   = accuracy(cos_sim_matrix.detach(), label.detach(), topk=(1,))[0]
+
+        return nloss, prec1

+ 53 - 0
loss/ge2e.py

@@ -0,0 +1,53 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+## Fast re-implementation of the GE2E loss (https://arxiv.org/abs/1710.10467) 
+## Numerically checked against https://github.com/cvqluu/GE2E-Loss
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import time, pdb, numpy
+from utils import accuracy
+
+class LossFunction(nn.Module):
+
+    def __init__(self, init_w=10.0, init_b=-5.0, **kwargs):
+        super(LossFunction, self).__init__()
+
+        self.test_normalize = True
+        
+        self.w = nn.Parameter(torch.tensor(init_w))
+        self.b = nn.Parameter(torch.tensor(init_b))
+        self.criterion  = torch.nn.CrossEntropyLoss()
+
+        print('Initialised GE2E')
+
+    def forward(self, x, label=None):
+
+        assert x.size()[1] >= 2
+
+        gsize = x.size()[1]
+        centroids = torch.mean(x, 1)
+        stepsize = x.size()[0]
+
+        cos_sim_matrix = []
+
+        for ii in range(0,gsize): 
+            idx = [*range(0,gsize)]
+            idx.remove(ii)
+            exc_centroids = torch.mean(x[:,idx,:], 1)
+            cos_sim_diag    = F.cosine_similarity(x[:,ii,:],exc_centroids)
+            cos_sim         = F.cosine_similarity(x[:,ii,:].unsqueeze(-1),centroids.unsqueeze(-1).transpose(0,2))
+            cos_sim[range(0,stepsize),range(0,stepsize)] = cos_sim_diag
+            cos_sim_matrix.append(torch.clamp(cos_sim,1e-6))
+
+        cos_sim_matrix = torch.stack(cos_sim_matrix,dim=1)
+
+        torch.clamp(self.w, 1e-6)
+        cos_sim_matrix = cos_sim_matrix * self.w + self.b
+        
+        label = torch.from_numpy(numpy.asarray(range(0,stepsize))).cuda()
+        nloss = self.criterion(cos_sim_matrix.view(-1,stepsize), torch.repeat_interleave(label,repeats=gsize,dim=0).cuda())
+        prec1 = accuracy(cos_sim_matrix.view(-1,stepsize).detach(), torch.repeat_interleave(label,repeats=gsize,dim=0).detach(), topk=(1,))[0]
+
+        return nloss, prec1

+ 36 - 0
loss/proto.py

@@ -0,0 +1,36 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+## Re-implementation of prototypical networks (https://arxiv.org/abs/1703.05175).
+## Numerically checked against https://github.com/cyvius96/prototypical-network-pytorch
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import time, pdb, numpy
+from utils import accuracy
+
+class LossFunction(nn.Module):
+
+    def __init__(self, **kwargs):
+        super(LossFunction, self).__init__()
+
+        self.test_normalize = False
+
+        self.criterion  = torch.nn.CrossEntropyLoss()
+
+        print('Initialised Prototypical Loss')
+
+    def forward(self, x, label=None):
+
+        assert x.size()[1] >= 2
+        
+        out_anchor      = torch.mean(x[:,1:,:],1)
+        out_positive    = x[:,0,:]
+        stepsize        = out_anchor.size()[0]
+
+        output  = -1 * (F.pairwise_distance(out_positive.unsqueeze(-1),out_anchor.unsqueeze(-1).transpose(0,2))**2)
+        label   = torch.from_numpy(numpy.asarray(range(0,stepsize))).cuda()
+        nloss   = self.criterion(output, label)
+        prec1   = accuracy(output.detach(), label.detach(), topk=(1,))[0]
+
+        return nloss, prec1

+ 27 - 0
loss/softmax.py

@@ -0,0 +1,27 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import time, pdb, numpy
+from utils import accuracy
+
+class LossFunction(nn.Module):
+	def __init__(self, nOut, nClasses, **kwargs):
+	    super(LossFunction, self).__init__()
+
+	    self.test_normalize = True
+	    
+	    self.criterion  = torch.nn.CrossEntropyLoss()
+	    self.fc 		= nn.Linear(nOut,nClasses)
+
+	    print('Initialised Softmax Loss')
+
+	def forward(self, x, label=None):
+
+		x 		= self.fc(x)
+		nloss   = self.criterion(x, label)
+		prec1	= accuracy(x.detach(), label.detach(), topk=(1,))[0]
+
+		return nloss, prec1

+ 29 - 0
loss/softmaxproto.py

@@ -0,0 +1,29 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torch
+import torch.nn as nn
+import loss.softmax as softmax
+import loss.angleproto as angleproto
+
+class LossFunction(nn.Module):
+
+    def __init__(self, **kwargs):
+        super(LossFunction, self).__init__()
+
+        self.test_normalize = True
+
+        self.softmax = softmax.LossFunction(**kwargs)
+        self.angleproto = angleproto.LossFunction(**kwargs)
+
+        print('Initialised SoftmaxPrototypical Loss')
+
+    def forward(self, x, label=None):
+
+        assert x.size()[1] == 2
+
+        nlossS, prec1   = self.softmax(x.reshape(-1,x.size()[-1]), label.repeat_interleave(2))
+
+        nlossP, _       = self.angleproto(x,None)
+
+        return nlossS+nlossP, prec1

+ 85 - 0
loss/triplet.py

@@ -0,0 +1,85 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+import time, pdb, numpy
+from utils import tuneThresholdfromScore
+import random
+
+class LossFunction(nn.Module):
+
+    def __init__(self, hard_rank=0, hard_prob=0, margin=0, **kwargs):
+        super(LossFunction, self).__init__()
+
+        self.test_normalize = True
+        
+        self.hard_rank  = hard_rank
+        self.hard_prob  = hard_prob
+        self.margin     = margin
+
+        print('Initialised Triplet Loss')
+
+    def forward(self, x, label=None):
+
+        assert x.size()[1] == 2
+        
+        out_anchor      = F.normalize(x[:,0,:], p=2, dim=1)
+        out_positive    = F.normalize(x[:,1,:], p=2, dim=1)
+        stepsize        = out_anchor.size()[0]
+
+        output      = -1 * (F.pairwise_distance(out_anchor.unsqueeze(-1),out_positive.unsqueeze(-1).transpose(0,2))**2)
+
+        negidx      = self.mineHardNegative(output.detach())
+
+        out_negative = out_positive[negidx,:]
+
+        labelnp     = numpy.array([1]*len(out_positive)+[0]*len(out_negative))
+
+        ## calculate distances
+        pos_dist    = F.pairwise_distance(out_anchor,out_positive)
+        neg_dist    = F.pairwise_distance(out_anchor,out_negative)
+
+        ## loss function
+        nloss   = torch.mean(F.relu(torch.pow(pos_dist, 2) - torch.pow(neg_dist, 2) + self.margin))
+
+        scores = -1 * torch.cat([pos_dist,neg_dist],dim=0).detach().cpu().numpy()
+
+        return nloss, nloss
+
+    ## ===== ===== ===== ===== ===== ===== ===== =====
+    ## Hard negative mining
+    ## ===== ===== ===== ===== ===== ===== ===== =====
+
+    def mineHardNegative(self, output):
+
+        negidx = []
+
+        for idx, similarity in enumerate(output):
+
+            simval, simidx = torch.sort(similarity,descending=True)
+
+            if self.hard_rank < 0:
+
+                ## Semi hard negative mining
+
+                semihardidx = simidx[(similarity[idx] - self.margin < simval) &  (simval < similarity[idx])]
+
+                if len(semihardidx) == 0:
+                    negidx.append(random.choice(simidx))
+                else:
+                    negidx.append(random.choice(semihardidx))
+
+            else:
+
+                ## Rank based negative mining
+                
+                simidx = simidx[simidx!=idx]
+
+                if random.random() < self.hard_prob:
+                    negidx.append(simidx[random.randint(0, self.hard_rank)])
+                else:
+                    negidx.append(random.choice(simidx))
+
+        return negidx

+ 8 - 0
models/ResNeXt50.py

@@ -0,0 +1,8 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torchvision
+
+def MainModel(nOut=256, **kwargs):
+    
+    return torchvision.models.resnext50_32x4d(num_classes=nOut)

+ 8 - 0
models/ResNet18.py

@@ -0,0 +1,8 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torchvision
+
+def MainModel(nOut=256, **kwargs):
+    
+    return torchvision.models.resnet18(num_classes=nOut)

BIN
models/amsoft_model.model


+ 10 - 0
optimizer/adam.py

@@ -0,0 +1,10 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torch
+
+def Optimizer(parameters, lr, weight_decay, **kwargs):
+
+	print('Initialised Adam optimizer')
+
+	return torch.optim.Adam(parameters, lr = lr, weight_decay = weight_decay);

+ 10 - 0
optimizer/sgd.py

@@ -0,0 +1,10 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torch
+
+def Optimizer(parameters, lr, weight_decay, **kwargs):
+
+	print('Initialised SGD optimizer')
+
+	return torch.optim.SGD(parameters, lr = lr, momentum = 0.9, weight_decay=weight_decay);

+ 5 - 0
requirements.txt

@@ -0,0 +1,5 @@
+torch>=1.6.0
+numpy
+scikit-learn
+pyyaml
+pillow

+ 14 - 0
scheduler/steplr.py

@@ -0,0 +1,14 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import torch
+
+def Scheduler(optimizer, test_interval, max_epoch, lr_decay, **kwargs):
+
+	sche_fn = torch.optim.lr_scheduler.StepLR(optimizer, step_size=test_interval, gamma=lr_decay)
+
+	lr_step = 'epoch'
+
+	print('Initialised step LR scheduler')
+
+	return sche_fn, lr_step

+ 207 - 0
server.py

@@ -0,0 +1,207 @@
+
+import os
+from utils import *
+from EmbedNet import *
+import torchvision.transforms as transforms
+from detectors import S3FD
+import argparse
+
+def createParser():
+    parser = argparse.ArgumentParser(description = "FaceNet");
+
+    parser.add_argument('--config',         type=str,   default=None,   help='Config YAML file');
+
+    ## Data loader
+    parser.add_argument('--batch_size',         type=int, default=200,	help='Batch size, number of classes per batch');
+    parser.add_argument('--max_img_per_cls',    type=int, default=500,	help='Maximum number of images per class per epoch');
+    parser.add_argument('--nDataLoaderThread',  type=int, default=5, 	help='Number of loader threads');
+
+    ## Training details
+    parser.add_argument('--test_interval',  type=int,   default=5,     help='Test and save every [test_interval] epochs');
+    parser.add_argument('--max_epoch',      type=int,   default=100,    help='Maximum number of epochs');
+    parser.add_argument('--trainfunc',      type=str,   default="softmax",  help='Loss function');
+
+    ## Optimizer
+    parser.add_argument('--optimizer',      type=str,   default="adam", help='sgd or adam');
+    parser.add_argument('--scheduler',      type=str,   default="steplr", help='Learning rate scheduler');
+    parser.add_argument('--lr',             type=float, default=0.001,  help='Learning rate');
+    parser.add_argument("--lr_decay",       type=float, default=0.90,   help='Learning rate decay every [test_interval] epochs');
+    parser.add_argument('--weight_decay',   type=float, default=0,      help='Weight decay in the optimizer');
+
+    ## Loss functions
+    parser.add_argument("--hard_prob",      type=float, default=0.5,    help='Hard negative mining probability, otherwise random, only for some loss functions');
+    parser.add_argument("--hard_rank",      type=int,   default=10,     help='Hard negative mining rank in the batch, only for some loss functions');
+    parser.add_argument('--margin',         type=float, default=0.1,    help='Loss margin, only for some loss functions');
+    parser.add_argument('--scale',          type=float, default=30,     help='Loss scale, only for some loss functions');
+    parser.add_argument('--nPerClass',      type=int,   default=1,      help='Number of images per class per batch, only for metric learning based losses');
+    parser.add_argument('--nClasses',       type=int,   default=8700,   help='Number of classes in the softmax layer, only for softmax-based losses');
+
+    ## Load and save
+    parser.add_argument('--initial_model',  type=str,   default="./models/amsoft_model.model",     help='Initial model weights');
+    parser.add_argument('--save_path',      type=str,   default="exps/exp1", help='Path for model and logs');
+
+    ## Training and test data
+    parser.add_argument('--train_path',     type=str,   default="data/vggface2", help='Absolute path to the train set');
+    parser.add_argument('--train_ext',      type=str,   default="jpg",          help='Training files extension');
+    parser.add_argument('--test_path',      type=str,   default="data/test",    help='Absolute path to the test set');
+    parser.add_argument('--test_list',      type=str,   default="data/test_list.csv",   help='Evaluation list');
+
+    ## Model definition
+    parser.add_argument('--model',          type=str,   default="ResNet18", help='Name of model definition');
+    parser.add_argument('--nOut',           type=int,   default=512,        help='Embedding size in the last FC layer');
+
+    ## For test only
+    parser.add_argument('--eval',           dest='eval', action='store_true', help='Eval only')
+
+    ## For server
+    parser.add_argument('--server',             dest='server',  action='store_true',    help='Server mode')
+    parser.add_argument('--feat_save_path',     type=str,       default='saved_feats',  help='Absolute path to the feature')
+    parser.add_argument('--port',               type=int,       default=10000,          help='Port for the server')
+
+    ## Distributed and mixed precision training
+    parser.add_argument('--mixedprec',      dest='mixedprec',   action='store_true', help='Enable mixed precision training')
+
+    args = parser.parse_args()
+
+    return args
+
+def loadParameters(model, path):
+    state = model.state_dict()
+    loaded_state = torch.load(path)
+
+    for name, param in loaded_state.items():
+        origname = name;
+        if name not in state:
+            if name not in state:
+                print("%s is not in the model."%origname);
+                continue;
+
+        if state[name].size() != loaded_state[origname].size():
+            print("Wrong parameter length: %s, model: %s, loaded: %s"%(origname, state[name].size(), loaded_state[origname].size()));
+            continue;
+
+        state[name].copy_(param);
+
+DET = S3FD(device='cuda')
+app = Flask(__name__)
+args = createParser()
+UNKNOWN_THRESHOLD = 0.5
+
+
+s = EmbedNet(**vars(args)).cuda()
+transform = transforms.Compose(
+        [transforms.ToTensor(),
+         transforms.Resize(256),
+         transforms.CenterCrop([224,224]),
+         transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
+
+# trainer  = ModelTrainer(s, **vars(args))
+
+loadParameters(s, args.initial_model)
+s.eval()
+
+
+@app.route('/query', methods=['POST'])
+def query():
+    # unpack the received data
+    data = pickle.loads(request.get_data())
+
+    image = data['img']
+    image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+    bboxes = DET.detect_faces(image_np, conf_th=0.9, scales=[0.5])
+
+    if len(bboxes) != 1:
+        return "fail"
+
+    bsi = 100
+
+    sx = int((bboxes[0][0]+bboxes[0][2])/2) + bsi
+    sy = int((bboxes[0][1]+bboxes[0][3])/2) + bsi
+    ss = int(max((bboxes[0][3]-bboxes[0][1]),(bboxes[0][2]-bboxes[0][0]))/2)
+
+    image = numpy.pad(image,((bsi,bsi),(bsi,bsi),(0,0)), 'constant', constant_values=(110,110))
+
+    face = image[int(sy-ss):int(sy+ss),int(sx-ss):int(sx+ss)]
+    face = cv2.resize(face,(240,240))
+
+    im1 = Image.fromarray(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
+
+    inp1 = transform(im1).cuda()
+
+    com_feat = s(inp1).detach().cpu()
+
+    files = glob.glob(os.path.join(args.feat_save_path, '*.pt'))
+    
+    max_score = 0
+    pname = 'none'
+    for file in files:
+
+        ref_feat = torch.load(file)
+
+        score = F.cosine_similarity(ref_feat, com_feat)
+        if(score>max_score) :
+            max_score = score.item()
+            pname = file.split('/')[1].split('.')[0]
+
+        print('{} {:.2f}'.format(file,score.item()))
+
+    if max_score < UNKNOWN_THRESHOLD:
+        max_score = 0
+        pname = "Unknown"
+            
+    return {
+        "file":pname,
+        "score":max_score,
+        "x1":bboxes[0][0],
+        "y1":bboxes[0][1],
+        "x2":bboxes[0][2],
+        "y2":bboxes[0][3]
+    }
+
+@app.route('/enroll', methods=['POST'])
+def enroll():
+    # unpack the received data
+    data = pickle.loads(request.get_data())
+
+    iname = data['name']
+
+    image = data['img']
+    image_np = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
+
+    bboxes = DET.detect_faces(image_np, conf_th=0.9, scales=[0.5])
+
+    bsi = 100
+
+    sx = int((bboxes[0][0]+bboxes[0][2])/2) + bsi
+    sy = int((bboxes[0][1]+bboxes[0][3])/2) + bsi
+    ss = int(max((bboxes[0][3]-bboxes[0][1]),(bboxes[0][2]-bboxes[0][0]))/2)
+
+    image = numpy.pad(image,((bsi,bsi),(bsi,bsi),(0,0)), 'constant', constant_values=(110,110))
+
+    face = image[int(sy-ss):int(sy+ss),int(sx-ss):int(sx+ss)]
+    face = cv2.resize(face,(240,240))
+
+    # TO-DO / 2022-08-25
+    # 0. Client 요구사항 : Enroll 시 종료 시까지 지속해서 사진 전송, 입력값(Name)은 중복없이 고유한 값이라고 가정
+    # 1. 인물별 폴더에 이미지를 저장
+    # 2. 이미지 저장 시 중복 방지 처리
+    # 3. 인물별 폴더의 사진들을 centroid를 통해 feature 추출
+
+    if not(os.path.exists(args.feat_save_path)):
+        os.makedirs(args.feat_save_path)
+            
+    cv2.imwrite(os.path.join(args.feat_save_path, '{}.jpg'.format(iname)),face)
+    im1 = Image.fromarray(cv2.cvtColor(face, cv2.COLOR_BGR2RGB))
+
+    inp1 = transform(im1).cuda()
+
+    ref_feat = s(inp1).detach().cpu()
+
+    torch.save(ref_feat, os.path.join(args.feat_save_path,'{}.pt'.format(iname)))
+
+    return "success"
+
+
+if __name__ == "__main__":
+    app.run(host='0.0.0.0', debug=True, port=args.port, threaded=False)

+ 196 - 0
trainEmbedNet.py

@@ -0,0 +1,196 @@
+#!/usr/bin/python
+#-*- coding: utf-8 -*-
+
+import sys, time, os, argparse, socket
+import yaml
+import pdb
+import glob
+import datetime
+from utils import *
+from EmbedNet import *
+from DatasetLoader import get_data_loader
+import torchvision.transforms as transforms
+
+# ## ===== ===== ===== ===== ===== ===== ===== =====
+# ## Parse arguments
+# ## ===== ===== ===== ===== ===== ===== ===== =====
+
+parser = argparse.ArgumentParser(description = "FaceNet");
+
+parser.add_argument('--config',         type=str,   default=None,   help='Config YAML file');
+
+## Data loader
+parser.add_argument('--batch_size',         type=int, default=200,	help='Batch size, number of classes per batch');
+parser.add_argument('--max_img_per_cls',    type=int, default=500,	help='Maximum number of images per class per epoch');
+parser.add_argument('--nDataLoaderThread',  type=int, default=5, 	help='Number of loader threads');
+
+## Training details
+parser.add_argument('--test_interval',  type=int,   default=5,     help='Test and save every [test_interval] epochs');
+parser.add_argument('--max_epoch',      type=int,   default=100,    help='Maximum number of epochs');
+parser.add_argument('--trainfunc',      type=str,   default="softmax",  help='Loss function');
+
+## Optimizer
+parser.add_argument('--optimizer',      type=str,   default="adam", help='sgd or adam');
+parser.add_argument('--scheduler',      type=str,   default="steplr", help='Learning rate scheduler');
+parser.add_argument('--lr',             type=float, default=0.001,  help='Learning rate');
+parser.add_argument("--lr_decay",       type=float, default=0.90,   help='Learning rate decay every [test_interval] epochs');
+parser.add_argument('--weight_decay',   type=float, default=0,      help='Weight decay in the optimizer');
+
+## Loss functions
+parser.add_argument("--hard_prob",      type=float, default=0.5,    help='Hard negative mining probability, otherwise random, only for some loss functions');
+parser.add_argument("--hard_rank",      type=int,   default=10,     help='Hard negative mining rank in the batch, only for some loss functions');
+parser.add_argument('--margin',         type=float, default=0.1,    help='Loss margin, only for some loss functions');
+parser.add_argument('--scale',          type=float, default=30,     help='Loss scale, only for some loss functions');
+parser.add_argument('--nPerClass',      type=int,   default=1,      help='Number of images per class per batch, only for metric learning based losses');
+parser.add_argument('--nClasses',       type=int,   default=8700,   help='Number of classes in the softmax layer, only for softmax-based losses');
+
+## Load and save
+parser.add_argument('--initial_model',  type=str,   default="./models/amsoft_model.model",     help='Initial model weights');
+parser.add_argument('--save_path',      type=str,   default="exps/exp1", help='Path for model and logs');
+
+## Training and test data
+parser.add_argument('--train_path',     type=str,   default="data/vggface2", help='Absolute path to the train set');
+parser.add_argument('--train_ext',      type=str,   default="jpg",          help='Training files extension');
+parser.add_argument('--test_path',      type=str,   default="data/test",    help='Absolute path to the test set');
+parser.add_argument('--test_list',      type=str,   default="data/test_list.csv",   help='Evaluation list');
+
+## Model definition
+parser.add_argument('--model',          type=str,   default="ResNet18", help='Name of model definition');
+parser.add_argument('--nOut',           type=int,   default=512,        help='Embedding size in the last FC layer');
+
+## For test only
+parser.add_argument('--eval',           dest='eval',    action='store_true', help='Eval only')
+
+## Distributed and mixed precision training
+parser.add_argument('--mixedprec',      dest='mixedprec',   action='store_true', help='Enable mixed precision training')
+
+args = parser.parse_args();
+
+## Parse YAML
+def find_option_type(key, parser):
+    for opt in parser._get_optional_actions():
+        if ('--' + key) in opt.option_strings:
+           return opt.type
+    raise ValueError
+
+if args.config is not None:
+    with open(args.config, "r") as f:
+        yml_config = yaml.load(f, Loader=yaml.FullLoader)
+    for k, v in yml_config.items():
+        if k in args.__dict__:
+            typ = find_option_type(k, parser)
+            args.__dict__[k] = typ(v)
+        else:
+            sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
+
+
+# ## ===== ===== ===== ===== ===== ===== ===== =====
+# ## Trainer script
+# ## ===== ===== ===== ===== ===== ===== ===== =====
+
+def main_worker(args):
+
+    ## Load models
+    s = EmbedNet(**vars(args)).cuda();
+
+    it          = 1
+
+    ## Write args to scorefile
+    scorefile = open(args.result_save_path+"/scores.txt", "a+");
+
+    strtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+    scorefile.write('%s\n%s\n'%(strtime,args))
+    scorefile.flush()
+
+    ## Input transformations for training
+    train_transform = transforms.Compose(
+        [transforms.ToTensor(),
+         transforms.Resize(256),
+         transforms.RandomCrop([224,224]),
+         transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
+
+    ## Input transformations for evaluation
+    test_transform = transforms.Compose(
+        [transforms.ToTensor(),
+         transforms.Resize(256),
+         transforms.CenterCrop([224,224]),
+         transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
+
+    ## Initialise trainer and data loader
+    trainLoader = get_data_loader(transform=train_transform, **vars(args));
+    trainer     = ModelTrainer(s, **vars(args))
+
+    ## Load model weights
+    modelfiles = glob.glob('%s/model0*.model'%args.model_save_path)
+    modelfiles.sort()
+
+    ## If the target directory already exists, start from the existing file
+    if len(modelfiles) >= 1:
+        trainer.loadParameters(modelfiles[-1]);
+        print("Model %s loaded from previous state!"%modelfiles[-1]);
+        it = int(os.path.splitext(os.path.basename(modelfiles[-1]))[0][5:]) + 1
+    elif(args.initial_model != ""):
+        trainer.loadParameters(args.initial_model);
+        print("Model %s loaded!"%args.initial_model);
+
+    ## If the current iteration is not 1, update the scheduler
+    for ii in range(1,it):
+        trainer.__scheduler__.step()
+    
+    ## Evaluation code 
+    if args.eval == True:
+
+        sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
+        result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
+
+        print('EER %2.4f'%(result[1]))
+        quit();
+
+    ## Core training script
+    for it in range(it,args.max_epoch+1):
+
+        clr = [x['lr'] for x in trainer.__optimizer__.param_groups]
+
+        print(time.strftime("%Y-%m-%d %H:%M:%S"), it, "Training epoch %d with LR %f "%(it,max(clr)));
+
+        loss, traineer = trainer.train_network(trainLoader, verbose=True);
+
+        if it % args.test_interval == 0:
+            
+            sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
+            result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
+
+            print("IT %d, VEER %2.4f"%(it, result[1]));
+            scorefile.write("IT %d, VEER %2.4f\n"%(it, result[1]));
+
+            trainer.saveParameters(args.model_save_path+"/model%09d.model"%it);
+
+        print(time.strftime("%Y-%m-%d %H:%M:%S"), "TEER/TAcc %2.2f, TLOSS %f"%( traineer, loss));
+        scorefile.write("IT %d, TEER/TAcc %2.2f, TLOSS %f\n"%(it, traineer, loss));
+
+        scorefile.flush()
+
+    scorefile.close();
+
+
+# ## ===== ===== ===== ===== ===== ===== ===== =====
+# ## Main function
+# ## ===== ===== ===== ===== ===== ===== ===== =====
+
+
+def main():
+
+    args.model_save_path     = args.save_path+"/model"
+    args.result_save_path    = args.save_path+"/result"
+
+    if not(os.path.exists(args.model_save_path)):
+        os.makedirs(args.model_save_path)
+            
+    if not(os.path.exists(args.result_save_path)):
+        os.makedirs(args.result_save_path)
+
+    main_worker(args)
+
+
+if __name__ == '__main__':
+    main()

+ 43 - 0
utils.py

@@ -0,0 +1,43 @@
+#! /usr/bin/python
+# -*- encoding: utf-8 -*-
+
+import numpy
+import torch
+import torch.nn.functional as F
+from sklearn import metrics
+from operator import itemgetter
+
+def accuracy(output, target, topk=(1,)):
+    """Computes the precision@k for the specified values of k"""
+    maxk = max(topk)
+    batch_size = target.size(0)
+
+    _, pred = output.topk(maxk, 1, True, True)
+    pred = pred.t()
+    correct = pred.eq(target.view(1, -1).expand_as(pred))
+
+    res = []
+    for k in topk:
+        correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
+        res.append(correct_k.mul_(100.0 / batch_size))
+    return res
+
+def tuneThresholdfromScore(scores, labels, target_fa, target_fr = None):
+    
+    fpr, tpr, thresholds = metrics.roc_curve(labels, scores, pos_label=1)
+    fnr = 1 - tpr
+
+    tunedThreshold = [];
+    if target_fr:
+        for tfr in target_fr:
+            idx = numpy.nanargmin(numpy.absolute((tfr - fnr)))
+            tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]);
+    
+    for tfa in target_fa:
+        idx = numpy.nanargmin(numpy.absolute((tfa - fpr))) # numpy.where(fpr<=tfa)[0][-1]
+        tunedThreshold.append([thresholds[idx], fpr[idx], fnr[idx]]);
+    
+    idxE = numpy.nanargmin(numpy.absolute((fnr - fpr)))
+    eer  = max(fpr[idxE],fnr[idxE])*100
+    
+    return (tunedThreshold, eer, fpr, fnr);