|
@@ -11,6 +11,9 @@ from EmbedNet import *
|
|
|
from DatasetLoader import get_data_loader
|
|
|
import torchvision.transforms as transforms
|
|
|
|
|
|
+import mlflow
|
|
|
+
|
|
|
+
|
|
|
# ## ===== ===== ===== ===== ===== ===== ===== =====
|
|
|
# ## Parse arguments
|
|
|
# ## ===== ===== ===== ===== ===== ===== ===== =====
|
|
@@ -90,87 +93,99 @@ if args.config is not None:
|
|
|
|
|
|
def main_worker(args):
|
|
|
|
|
|
- ## Load models
|
|
|
- s = EmbedNet(**vars(args)).cuda();
|
|
|
-
|
|
|
- it = 1
|
|
|
-
|
|
|
- ## Write args to scorefile
|
|
|
- scorefile = open(args.result_save_path+"/scores.txt", "a+");
|
|
|
-
|
|
|
- strtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
- scorefile.write('%s\n%s\n'%(strtime,args))
|
|
|
- scorefile.flush()
|
|
|
-
|
|
|
- ## Input transformations for training
|
|
|
- train_transform = transforms.Compose(
|
|
|
- [transforms.ToTensor(),
|
|
|
- transforms.Resize(256),
|
|
|
- transforms.RandomCrop([224,224]),
|
|
|
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
|
|
|
-
|
|
|
- ## Input transformations for evaluation
|
|
|
- test_transform = transforms.Compose(
|
|
|
- [transforms.ToTensor(),
|
|
|
- transforms.Resize(256),
|
|
|
- transforms.CenterCrop([224,224]),
|
|
|
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
|
|
|
-
|
|
|
- ## Initialise trainer and data loader
|
|
|
- trainLoader = get_data_loader(transform=train_transform, **vars(args));
|
|
|
- trainer = ModelTrainer(s, **vars(args))
|
|
|
-
|
|
|
- ## Load model weights
|
|
|
- modelfiles = glob.glob('%s/model0*.model'%args.model_save_path)
|
|
|
- modelfiles.sort()
|
|
|
-
|
|
|
- ## If the target directory already exists, start from the existing file
|
|
|
- if len(modelfiles) >= 1:
|
|
|
- trainer.loadParameters(modelfiles[-1]);
|
|
|
- print("Model %s loaded from previous state!"%modelfiles[-1]);
|
|
|
- it = int(os.path.splitext(os.path.basename(modelfiles[-1]))[0][5:]) + 1
|
|
|
- elif(args.initial_model != ""):
|
|
|
- trainer.loadParameters(args.initial_model);
|
|
|
- print("Model %s loaded!"%args.initial_model);
|
|
|
-
|
|
|
- ## If the current iteration is not 1, update the scheduler
|
|
|
- for ii in range(1,it):
|
|
|
- trainer.__scheduler__.step()
|
|
|
+ ## mlflow
|
|
|
+ with mlflow.start_run(run_name=args.lr):
|
|
|
|
|
|
- ## Evaluation code
|
|
|
- if args.eval == True:
|
|
|
-
|
|
|
- sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
|
|
|
- result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
|
|
|
-
|
|
|
- print('EER %2.4f'%(result[1]))
|
|
|
- quit();
|
|
|
+ ## Load models
|
|
|
+ s = EmbedNet(**vars(args)).cuda();
|
|
|
|
|
|
- ## Core training script
|
|
|
- for it in range(it,args.max_epoch+1):
|
|
|
+ it = 1
|
|
|
|
|
|
- clr = [x['lr'] for x in trainer.__optimizer__.param_groups]
|
|
|
+ ## Write args to scorefile
|
|
|
+ scorefile = open(args.result_save_path+"/scores.txt", "a+");
|
|
|
|
|
|
- print(time.strftime("%Y-%m-%d %H:%M:%S"), it, "Training epoch %d with LR %f "%(it,max(clr)));
|
|
|
+ strtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
+ scorefile.write('%s\n%s\n'%(strtime,args))
|
|
|
+ scorefile.flush()
|
|
|
|
|
|
- loss, traineer = trainer.train_network(trainLoader, verbose=True);
|
|
|
+ ## Input transformations for training
|
|
|
+ train_transform = transforms.Compose(
|
|
|
+ [transforms.ToTensor(),
|
|
|
+ transforms.Resize(256),
|
|
|
+ transforms.RandomCrop([224,224]),
|
|
|
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
|
|
|
+
|
|
|
+ ## Input transformations for evaluation
|
|
|
+ test_transform = transforms.Compose(
|
|
|
+ [transforms.ToTensor(),
|
|
|
+ transforms.Resize(256),
|
|
|
+ transforms.CenterCrop([224,224]),
|
|
|
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
|
|
|
+
|
|
|
+ ## Initialise trainer and data loader
|
|
|
+ trainLoader = get_data_loader(transform=train_transform, **vars(args));
|
|
|
+ trainer = ModelTrainer(s, **vars(args))
|
|
|
+
|
|
|
+ ## Load model weights
|
|
|
+ modelfiles = glob.glob('%s/model0*.model'%args.model_save_path)
|
|
|
+ modelfiles.sort()
|
|
|
+
|
|
|
+ ## If the target directory already exists, start from the existing file
|
|
|
+ if len(modelfiles) >= 1:
|
|
|
+ trainer.loadParameters(modelfiles[-1]);
|
|
|
+ print("Model %s loaded from previous state!"%modelfiles[-1]);
|
|
|
+ it = int(os.path.splitext(os.path.basename(modelfiles[-1]))[0][5:]) + 1
|
|
|
+ elif(args.initial_model != ""):
|
|
|
+ trainer.loadParameters(args.initial_model);
|
|
|
+ print("Model %s loaded!"%args.initial_model);
|
|
|
+
|
|
|
+ ## If the current iteration is not 1, update the scheduler
|
|
|
+ for ii in range(1,it):
|
|
|
+ trainer.__scheduler__.step()
|
|
|
+
|
|
|
+ ## Evaluation code
|
|
|
+ if args.eval == True:
|
|
|
|
|
|
- if it % args.test_interval == 0:
|
|
|
-
|
|
|
sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
|
|
|
result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
|
|
|
|
|
|
- print("IT %d, VEER %2.4f"%(it, result[1]));
|
|
|
- scorefile.write("IT %d, VEER %2.4f\n"%(it, result[1]));
|
|
|
+ print('EER %2.4f'%(result[1]))
|
|
|
+ quit();
|
|
|
|
|
|
- trainer.saveParameters(args.model_save_path+"/model%09d.model"%it);
|
|
|
+ ## Core training script
|
|
|
+ for it in range(it,args.max_epoch+1):
|
|
|
|
|
|
- print(time.strftime("%Y-%m-%d %H:%M:%S"), "TEER/TAcc %2.2f, TLOSS %f"%( traineer, loss));
|
|
|
- scorefile.write("IT %d, TEER/TAcc %2.2f, TLOSS %f\n"%(it, traineer, loss));
|
|
|
+ clr = [x['lr'] for x in trainer.__optimizer__.param_groups]
|
|
|
|
|
|
- scorefile.flush()
|
|
|
+ print(time.strftime("%Y-%m-%d %H:%M:%S"), it, "Training epoch %d with LR %f "%(it,max(clr)));
|
|
|
+
|
|
|
+ loss, traineer = trainer.train_network(trainLoader, verbose=True);
|
|
|
+
|
|
|
+ if it % args.test_interval == 0:
|
|
|
+
|
|
|
+ sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
|
|
|
+ result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
|
|
|
+
|
|
|
+ print("IT %d, VEER %2.4f"%(it, result[1]));
|
|
|
+ scorefile.write("IT %d, VEER %2.4f\n"%(it, result[1]));
|
|
|
+
|
|
|
+ ## mlflow logging
|
|
|
+ mlflow.log_metric("VEER", float(result[1]))
|
|
|
+
|
|
|
+ trainer.saveParameters(args.model_save_path+"/model%09d.model"%it);
|
|
|
+
|
|
|
+ print(time.strftime("%Y-%m-%d %H:%M:%S"), "TEER/TAcc %2.2f, TLOSS %f"%( traineer, loss));
|
|
|
+ scorefile.write("IT %d, TEER/TAcc %2.2f, TLOSS %f\n"%(it, traineer, loss));
|
|
|
+
|
|
|
+ scorefile.flush()
|
|
|
+
|
|
|
+ ## mlflow logging
|
|
|
+ mlflow.log_param("lr", clr)
|
|
|
+ mlflow.log_metric("TEER_TAcc", float(traineer))
|
|
|
+ mlflow.log_metric("TLOSS", float(loss))
|
|
|
+ mlflow.log_artifacts(args.result_save_path)
|
|
|
|
|
|
- scorefile.close();
|
|
|
+ scorefile.close();
|
|
|
|
|
|
|
|
|
# ## ===== ===== ===== ===== ===== ===== ===== =====
|