trainEmbedNet.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. #!/usr/bin/python
  2. #-*- coding: utf-8 -*-
  3. import sys, time, os, argparse, socket
  4. import yaml
  5. import pdb
  6. import glob
  7. import datetime
  8. from utils import *
  9. from EmbedNet import *
  10. from DatasetLoader import get_data_loader
  11. import torchvision.transforms as transforms
  12. # ## ===== ===== ===== ===== ===== ===== ===== =====
  13. # ## Parse arguments
  14. # ## ===== ===== ===== ===== ===== ===== ===== =====
  15. parser = argparse.ArgumentParser(description = "FaceNet");
  16. parser.add_argument('--config', type=str, default=None, help='Config YAML file');
  17. ## Data loader
  18. parser.add_argument('--batch_size', type=int, default=200, help='Batch size, number of classes per batch');
  19. parser.add_argument('--max_img_per_cls', type=int, default=500, help='Maximum number of images per class per epoch');
  20. parser.add_argument('--nDataLoaderThread', type=int, default=5, help='Number of loader threads');
  21. ## Training details
  22. parser.add_argument('--test_interval', type=int, default=5, help='Test and save every [test_interval] epochs');
  23. parser.add_argument('--max_epoch', type=int, default=100, help='Maximum number of epochs');
  24. parser.add_argument('--trainfunc', type=str, default="softmax", help='Loss function');
  25. ## Optimizer
  26. parser.add_argument('--optimizer', type=str, default="adam", help='sgd or adam');
  27. parser.add_argument('--scheduler', type=str, default="steplr", help='Learning rate scheduler');
  28. parser.add_argument('--lr', type=float, default=0.001, help='Learning rate');
  29. parser.add_argument("--lr_decay", type=float, default=0.90, help='Learning rate decay every [test_interval] epochs');
  30. parser.add_argument('--weight_decay', type=float, default=0, help='Weight decay in the optimizer');
  31. ## Loss functions
  32. parser.add_argument("--hard_prob", type=float, default=0.5, help='Hard negative mining probability, otherwise random, only for some loss functions');
  33. parser.add_argument("--hard_rank", type=int, default=10, help='Hard negative mining rank in the batch, only for some loss functions');
  34. parser.add_argument('--margin', type=float, default=0.1, help='Loss margin, only for some loss functions');
  35. parser.add_argument('--scale', type=float, default=30, help='Loss scale, only for some loss functions');
  36. parser.add_argument('--nPerClass', type=int, default=1, help='Number of images per class per batch, only for metric learning based losses');
  37. parser.add_argument('--nClasses', type=int, default=8700, help='Number of classes in the softmax layer, only for softmax-based losses');
  38. ## Load and save
  39. parser.add_argument('--initial_model', type=str, default="./models/amsoft_model.model", help='Initial model weights');
  40. parser.add_argument('--save_path', type=str, default="exps/exp1", help='Path for model and logs');
  41. ## Training and test data
  42. parser.add_argument('--train_path', type=str, default="data/vggface2", help='Absolute path to the train set');
  43. parser.add_argument('--train_ext', type=str, default="jpg", help='Training files extension');
  44. parser.add_argument('--test_path', type=str, default="data/test", help='Absolute path to the test set');
  45. parser.add_argument('--test_list', type=str, default="data/test_list.csv", help='Evaluation list');
  46. ## Model definition
  47. parser.add_argument('--model', type=str, default="ResNet18", help='Name of model definition');
  48. parser.add_argument('--nOut', type=int, default=512, help='Embedding size in the last FC layer');
  49. ## For test only
  50. parser.add_argument('--eval', dest='eval', action='store_true', help='Eval only')
  51. ## Distributed and mixed precision training
  52. parser.add_argument('--mixedprec', dest='mixedprec', action='store_true', help='Enable mixed precision training')
  53. args = parser.parse_args();
  54. ## Parse YAML
  55. def find_option_type(key, parser):
  56. for opt in parser._get_optional_actions():
  57. if ('--' + key) in opt.option_strings:
  58. return opt.type
  59. raise ValueError
  60. if args.config is not None:
  61. with open(args.config, "r") as f:
  62. yml_config = yaml.load(f, Loader=yaml.FullLoader)
  63. for k, v in yml_config.items():
  64. if k in args.__dict__:
  65. typ = find_option_type(k, parser)
  66. args.__dict__[k] = typ(v)
  67. else:
  68. sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
  69. # ## ===== ===== ===== ===== ===== ===== ===== =====
  70. # ## Trainer script
  71. # ## ===== ===== ===== ===== ===== ===== ===== =====
  72. def main_worker(args):
  73. ## Load models
  74. s = EmbedNet(**vars(args)).cuda();
  75. it = 1
  76. ## Write args to scorefile
  77. scorefile = open(args.result_save_path+"/scores.txt", "a+");
  78. strtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
  79. scorefile.write('%s\n%s\n'%(strtime,args))
  80. scorefile.flush()
  81. ## Input transformations for training
  82. train_transform = transforms.Compose(
  83. [transforms.ToTensor(),
  84. transforms.Resize(256),
  85. transforms.RandomCrop([224,224]),
  86. transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
  87. ## Input transformations for evaluation
  88. test_transform = transforms.Compose(
  89. [transforms.ToTensor(),
  90. transforms.Resize(256),
  91. transforms.CenterCrop([224,224]),
  92. transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
  93. ## Initialise trainer and data loader
  94. trainLoader = get_data_loader(transform=train_transform, **vars(args));
  95. trainer = ModelTrainer(s, **vars(args))
  96. ## Load model weights
  97. modelfiles = glob.glob('%s/model0*.model'%args.model_save_path)
  98. modelfiles.sort()
  99. ## If the target directory already exists, start from the existing file
  100. if len(modelfiles) >= 1:
  101. trainer.loadParameters(modelfiles[-1]);
  102. print("Model %s loaded from previous state!"%modelfiles[-1]);
  103. it = int(os.path.splitext(os.path.basename(modelfiles[-1]))[0][5:]) + 1
  104. elif(args.initial_model != ""):
  105. trainer.loadParameters(args.initial_model);
  106. print("Model %s loaded!"%args.initial_model);
  107. ## If the current iteration is not 1, update the scheduler
  108. for ii in range(1,it):
  109. trainer.__scheduler__.step()
  110. ## Evaluation code
  111. if args.eval == True:
  112. sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
  113. result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
  114. print('EER %2.4f'%(result[1]))
  115. quit();
  116. ## Core training script
  117. for it in range(it,args.max_epoch+1):
  118. clr = [x['lr'] for x in trainer.__optimizer__.param_groups]
  119. print(time.strftime("%Y-%m-%d %H:%M:%S"), it, "Training epoch %d with LR %f "%(it,max(clr)));
  120. loss, traineer = trainer.train_network(trainLoader, verbose=True);
  121. if it % args.test_interval == 0:
  122. sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
  123. result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
  124. print("IT %d, VEER %2.4f"%(it, result[1]));
  125. scorefile.write("IT %d, VEER %2.4f\n"%(it, result[1]));
  126. trainer.saveParameters(args.model_save_path+"/model%09d.model"%it);
  127. print(time.strftime("%Y-%m-%d %H:%M:%S"), "TEER/TAcc %2.2f, TLOSS %f"%( traineer, loss));
  128. scorefile.write("IT %d, TEER/TAcc %2.2f, TLOSS %f\n"%(it, traineer, loss));
  129. scorefile.flush()
  130. scorefile.close();
  131. # ## ===== ===== ===== ===== ===== ===== ===== =====
  132. # ## Main function
  133. # ## ===== ===== ===== ===== ===== ===== ===== =====
  134. def main():
  135. args.model_save_path = args.save_path+"/model"
  136. args.result_save_path = args.save_path+"/result"
  137. args.feat_save_path = ""
  138. if not(os.path.exists(args.model_save_path)):
  139. os.makedirs(args.model_save_path)
  140. if not(os.path.exists(args.result_save_path)):
  141. os.makedirs(args.result_save_path)
  142. main_worker(args)
  143. if __name__ == '__main__':
  144. main()