inference.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743
  1. import ast
  2. import logging
  3. import math
  4. import os
  5. import sys
  6. import editdistance
  7. import numpy as np
  8. import torch
  9. from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
  10. from fairseq.data.data_utils import post_process
  11. from fairseq.logging.meters import StopwatchMeter, TimeMeter
  12. import soundfile as sf
  13. import torch.nn.functional as F
  14. import glob
  15. from ctcdecode import CTCBeamDecoder
  16. logging.basicConfig()
  17. logging.root.setLevel(logging.INFO)
  18. logging.basicConfig(level=logging.INFO)
  19. logger = logging.getLogger(__name__)
  20. def add_asr_eval_argument(parser):
  21. parser.add_argument("--kspmodel", default=None, help="sentence piece model")
  22. parser.add_argument(
  23. "--wfstlm", default=None, help="wfstlm on dictonary output units"
  24. )
  25. parser.add_argument(
  26. "--rnnt_decoding_type",
  27. default="greedy",
  28. help="wfstlm on dictonary\
  29. output units",
  30. )
  31. try:
  32. parser.add_argument(
  33. "--lm-weight",
  34. "--lm_weight",
  35. type=float,
  36. default=0.2,
  37. help="weight for lm while interpolating with neural score",
  38. )
  39. except:
  40. pass
  41. parser.add_argument(
  42. "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
  43. )
  44. parser.add_argument(
  45. "--w2l-decoder",
  46. choices=["viterbi", "kenlm", "fairseqlm", "parlance", "online"],
  47. help="use a w2l decoder",
  48. )
  49. parser.add_argument("--lexicon", help="lexicon for w2l decoder")
  50. parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
  51. parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
  52. parser.add_argument("--beam-threshold", type=float, default=25.0)
  53. parser.add_argument("--beam-size-token", type=float, default=100)
  54. parser.add_argument("--word-score", type=float, default=1.0)
  55. parser.add_argument("--unk-weight", type=float, default=-math.inf)
  56. parser.add_argument("--sil-weight", type=float, default=0.0)
  57. parser.add_argument(
  58. "--dump-emissions",
  59. type=str,
  60. default=None,
  61. help="if present, dumps emissions into this file and exits",
  62. )
  63. parser.add_argument(
  64. "--dump-features",
  65. type=str,
  66. default=None,
  67. help="if present, dumps features into this file and exits",
  68. )
  69. parser.add_argument(
  70. "--load-emissions",
  71. type=str,
  72. default=None,
  73. help="if present, loads emissions from this file",
  74. )
  75. return parser
  76. def check_args(args):
  77. # assert args.path is not None, "--path required for generation!"
  78. # assert args.results_path is not None, "--results_path required for generation!"
  79. assert (
  80. not args.sampling or args.nbest == args.beam
  81. ), "--sampling requires --nbest to be equal to --beam"
  82. assert (
  83. args.replace_unk is None or args.raw_text
  84. ), "--replace-unk requires a raw text dataset (--raw-text)"
  85. def get_dataset_itr(args, task, models):
  86. return task.get_batch_iterator(
  87. dataset=task.dataset(args.gen_subset),
  88. max_tokens=args.max_tokens,
  89. max_sentences=args.batch_size,
  90. max_positions=(sys.maxsize, sys.maxsize),
  91. ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
  92. required_batch_size_multiple=args.required_batch_size_multiple,
  93. num_shards=args.num_shards,
  94. shard_id=args.shard_id,
  95. num_workers=args.num_workers,
  96. data_buffer_size=args.data_buffer_size,
  97. ).next_epoch_itr(shuffle=False)
  98. def decode( args, hypos, tgt_dict ):
  99. for hypo in hypos[: min(len(hypos), args.nbest)]:
  100. hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
  101. if "words" in hypo:
  102. hyp_words = " ".join(hypo["words"])
  103. else:
  104. hyp_words = post_process(hyp_pieces, args.post_process)
  105. return hyp_words
  106. def process_predictions(
  107. args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
  108. ):
  109. for hypo in hypos[: min(len(hypos), args.nbest)]:
  110. hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
  111. if "words" in hypo:
  112. hyp_words = " ".join(hypo["words"])
  113. else:
  114. hyp_words = post_process(hyp_pieces, args.post_process)
  115. if res_files is not None:
  116. print(
  117. "{} ({}-{})".format(hyp_pieces, speaker, id),
  118. file=res_files["hypo.units"],
  119. )
  120. print(
  121. "{} ({}-{})".format(hyp_words, speaker, id),
  122. file=res_files["hypo.words"],
  123. )
  124. tgt_pieces = tgt_dict.string(target_tokens)
  125. tgt_words = post_process(tgt_pieces, args.post_process)
  126. if res_files is not None:
  127. print(
  128. "{} ({}-{})".format(tgt_pieces, speaker, id),
  129. file=res_files["ref.units"],
  130. )
  131. print(
  132. "{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
  133. )
  134. if not args.quiet:
  135. logger.info("HYPO:" + hyp_words)
  136. logger.info("TARGET:" + tgt_words)
  137. logger.info("___________________")
  138. hyp_words = hyp_words.split()
  139. tgt_words = tgt_words.split()
  140. return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
  141. def prepare_result_files(args):
  142. def get_res_file(file_prefix):
  143. if args.num_shards > 1:
  144. file_prefix = f"{args.shard_id}_{file_prefix}"
  145. path = os.path.join(
  146. args.results_path,
  147. "{}-{}-{}.txt".format(
  148. file_prefix, os.path.basename(args.path), args.gen_subset
  149. ),
  150. )
  151. return open(path, "w", buffering=1)
  152. if not args.results_path:
  153. return None
  154. return {
  155. "hypo.words": get_res_file("hypo.word"),
  156. "hypo.units": get_res_file("hypo.units"),
  157. "ref.words": get_res_file("ref.word"),
  158. "ref.units": get_res_file("ref.units"),
  159. }
  160. def optimize_models(args, use_cuda, models):
  161. """Optimize ensemble for generation"""
  162. for model in models:
  163. model.make_generation_fast_(
  164. beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
  165. need_attn=args.print_alignment,
  166. )
  167. if args.fp16:
  168. model.half()
  169. if use_cuda:
  170. model.cuda()
  171. def apply_half(t):
  172. if t.dtype is torch.float32:
  173. return t.to(dtype=torch.half)
  174. return t
  175. def get_feature_to_path(filepath):
  176. wav, sample_rate = sf.read(filepath)
  177. feats = torch.from_numpy(wav).float()
  178. feats = feature_postprocess(feats)
  179. return feats
  180. def get_feature(wav):
  181. audio = np.array(wav).squeeze()
  182. feats = torch.from_numpy(audio).float()
  183. feats = feature_postprocess(feats)
  184. return feats
  185. def feature_postprocess(feats):
  186. if feats.dim == 2:
  187. feats = feats.mean(-1)
  188. assert feats.dim() == 1, feats.dim()
  189. with torch.no_grad():
  190. feats = F.layer_norm(feats, feats.shape)
  191. return feats
  192. def convert_to_string(tokens, vocab, seq_len):
  193. return "".join([vocab[x] for x in tokens[0:seq_len]])
  194. def main(args, task=None, model_state=None):
  195. check_args(args)
  196. use_fp16 = args.fp16
  197. if args.max_tokens is None and args.batch_size is None:
  198. args.max_tokens = 4000000
  199. logger.info(args)
  200. use_cuda = torch.cuda.is_available() and not args.cpu
  201. logger.info("| decoding with criterion {}".format(args.criterion))
  202. task = tasks.setup_task(args)
  203. # Load ensemble
  204. if args.load_emissions:
  205. models, criterions = [], []
  206. task.load_dataset(args.gen_subset)
  207. else:
  208. logger.info("| loading model(s) from {}".format(args.path))
  209. models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
  210. utils.split_paths(args.path, separator="\\"),
  211. arg_overrides=ast.literal_eval(args.model_overrides),
  212. task=task,
  213. suffix=args.checkpoint_suffix,
  214. strict=(args.checkpoint_shard_count == 1),
  215. num_shards=args.checkpoint_shard_count,
  216. state=model_state,
  217. )
  218. optimize_models(args, use_cuda, models)
  219. task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
  220. # Set dictionary
  221. tgt_dict = task.target_dictionary
  222. logger.info(f"| | tgt_dict = {tgt_dict.indices}")
  223. labels = [k for k in tgt_dict.indices.keys()]
  224. # for k, v in tgt_dict.indices.items():
  225. # labels.append()
  226. logger.info(
  227. "| {} {} {} examples".format(
  228. args.data, args.gen_subset, len(task.dataset(args.gen_subset))
  229. )
  230. )
  231. # hack to pass transitions to W2lDecoder
  232. if args.criterion == "asg_loss":
  233. raise NotImplementedError("asg_loss is currently not supported")
  234. # trans = criterions[0].asg.trans.data
  235. # args.asg_transitions = torch.flatten(trans).tolist()
  236. # Load dataset (possibly sharded)
  237. itr = get_dataset_itr(args, task, models)
  238. # Initialize generator
  239. # gen_timer = StopwatchMeter()
  240. def build_generator(args):
  241. w2l_decoder = getattr(args, "w2l_decoder", None)
  242. if w2l_decoder == "viterbi":
  243. from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
  244. return W2lViterbiDecoder(args, task.target_dictionary)
  245. elif w2l_decoder == "kenlm":
  246. from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
  247. return W2lKenLMDecoder(args, task.target_dictionary)
  248. elif w2l_decoder == "fairseqlm":
  249. from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
  250. return W2lFairseqLMDecoder(args, task.target_dictionary)
  251. else:
  252. print(
  253. "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
  254. )
  255. # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
  256. generator = build_generator(args)
  257. if args.load_emissions:
  258. generator = ExistingEmissionsDecoder(
  259. generator, np.load(args.load_emissions, allow_pickle=True)
  260. )
  261. logger.info("loaded emissions from " + args.load_emissions)
  262. num_sentences = 0
  263. if args.results_path is not None and not os.path.exists(args.results_path):
  264. os.makedirs(args.results_path)
  265. max_source_pos = (
  266. utils.resolve_max_positions(
  267. task.max_positions(), *[model.max_positions() for model in models]
  268. ),
  269. )
  270. if max_source_pos is not None:
  271. max_source_pos = max_source_pos[0]
  272. if max_source_pos is not None:
  273. max_source_pos = max_source_pos[0] - 1
  274. if args.dump_emissions:
  275. emissions = {}
  276. if args.dump_features:
  277. features = {}
  278. models[0].bert.proj = None
  279. else:
  280. res_files = prepare_result_files(args)
  281. errs_t = 0
  282. lengths_t = 0
  283. ################ test code ################
  284. _sample = dict()
  285. _net_input = dict()
  286. _cuda = 'cpu'
  287. feature = get_feature_to_path("/root/mnt/data/kspon_ori/ogg/fork/KsponSpeech_01/KsponSpeech_0001/KsponSpeech_000002.ogg")
  288. _net_input["source"] = feature.unsqueeze(0).to(_cuda)
  289. padding_mask = torch.BoolTensor(_net_input["source"].size(1)).fill_(False).unsqueeze(0).to(_cuda)
  290. _net_input["padding_mask"] = padding_mask
  291. _sample["net_input"] = _net_input
  292. ## model cuda change
  293. models[0].to(_cuda)
  294. decoder = CTCBeamDecoder(
  295. labels,
  296. model_path=None,
  297. alpha=0,
  298. beta=0,
  299. cutoff_top_n=40,
  300. cutoff_prob=1.0,
  301. beam_width=100,
  302. num_processes=4,
  303. blank_id=0,
  304. log_probs_input=False
  305. )
  306. with torch.no_grad():
  307. encoder_input = {
  308. k: v for k, v in _sample["net_input"].items() if k != "prev_output_tokens"
  309. }
  310. model = models[0]
  311. encoder_out = model(**encoder_input)
  312. emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
  313. logger.info(f"| | emissions = {emissions}, {emissions.shape}")
  314. sft = torch.nn.functional.softmax(emissions, dim=2)
  315. logger.info(f"| | sft = {sft.shape}, {sft[0][0].sum()}")
  316. beam_results, beam_scores, timesteps, out_lens = decoder.decode(sft)
  317. output_str = convert_to_string(beam_results[0][0], labels, out_lens[0][0]).replace("|", " ")
  318. tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}]
  319. output_str2 = decode( args, tmp, tgt_dict )
  320. prefix_tokens = None
  321. hypos = task.inference_step(generator, models, _sample, prefix_tokens)
  322. logger.info(f"| | hypos = {hypos}")
  323. hypos = decode( args, hypos[0], tgt_dict )
  324. logger.info(f"| | hypos _ decoding = {hypos}")
  325. # beam_results, beam_scores, timesteps, out_lens = decoder.decode(torch.tensor(hypos))
  326. ################ test code ################
  327. with progress_bar.build_progress_bar(args, itr) as t:
  328. wps_meter = TimeMeter()
  329. # logger.info(f"| | in progress_bar = {t}")
  330. for sample in t:
  331. logger.info(f"| | in progress_bar | sample = {sample}")
  332. sample = utils.move_to_cuda(sample) if use_cuda else sample
  333. if use_fp16:
  334. sample = utils.apply_to_sample(apply_half, sample)
  335. if "net_input" not in sample:
  336. continue
  337. prefix_tokens = None
  338. if args.prefix_size > 0:
  339. prefix_tokens = sample["target"][:, : args.prefix_size]
  340. # gen_timer.start()
  341. # if args.dump_emissions:
  342. # with torch.no_grad():
  343. # encoder_out = models[0](**sample["net_input"])
  344. # emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
  345. # emm = emm.transpose(0, 1).cpu().numpy()
  346. # for i, id in enumerate(sample["id"]):
  347. # emissions[id.item()] = emm[i]
  348. # continue
  349. # elif args.dump_features:
  350. # with torch.no_grad():
  351. # encoder_out = models[0](**sample["net_input"])
  352. # feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
  353. # for i, id in enumerate(sample["id"]):
  354. # padding = (
  355. # encoder_out["encoder_padding_mask"][i].cpu().numpy()
  356. # if encoder_out["encoder_padding_mask"] is not None
  357. # else None
  358. # )
  359. # features[id.item()] = (feat[i], padding)
  360. # continue
  361. hypos = task.inference_step(generator, models, sample, prefix_tokens)
  362. print("hypos = ", hypos)
  363. print(f"tgt_dict = {tgt_dict}")
  364. exit()
  365. num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
  366. for i, sample_id in enumerate(sample["id"].tolist()):
  367. speaker = None
  368. # id = task.dataset(args.gen_subset).ids[int(sample_id)]
  369. id = sample_id
  370. toks = (
  371. sample["target"][i, :]
  372. if "target_label" not in sample
  373. else sample["target_label"][i, :]
  374. )
  375. target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
  376. # Process top predictions
  377. hypos = process_predictions(
  378. args,
  379. hypos[i],
  380. None,
  381. tgt_dict,
  382. target_tokens,
  383. res_files,
  384. speaker,
  385. id,
  386. )
  387. wps_meter.update(num_generated_tokens)
  388. t.log({"wps": round(wps_meter.avg)})
  389. num_sentences += (
  390. sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
  391. )
  392. wer = None
  393. if args.dump_emissions:
  394. emm_arr = []
  395. for i in range(len(emissions)):
  396. emm_arr.append(emissions[i])
  397. np.save(args.dump_emissions, emm_arr)
  398. logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
  399. elif args.dump_features:
  400. feat_arr = []
  401. for i in range(len(features)):
  402. feat_arr.append(features[i])
  403. np.save(args.dump_features, feat_arr)
  404. logger.info(f"saved {len(features)} emissions to {args.dump_features}")
  405. else:
  406. if lengths_t > 0:
  407. wer = errs_t * 100.0 / lengths_t
  408. logger.info(f"WER: {wer}")
  409. # logger.info(
  410. # "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
  411. # "sentences/s, {:.2f} tokens/s)".format(
  412. # num_sentences,
  413. # gen_timer.n,
  414. # gen_timer.sum,
  415. # num_sentences / gen_timer.sum,
  416. # 1.0 / gen_timer.avg,
  417. # )
  418. # )
  419. # logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
  420. return task, wer
  421. def make_parser():
  422. parser = options.get_generation_parser()
  423. parser = add_asr_eval_argument(parser)
  424. return parser
  425. def cli_main():
  426. parser = make_parser()
  427. args = options.parse_args_and_arch(parser)
  428. main(args)
  429. exit()
  430. def build_generator(args):
  431. w2l_decoder = getattr(args, "w2l_decoder", None)
  432. if w2l_decoder == "viterbi":
  433. from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
  434. return W2lViterbiDecoder(args, task.target_dictionary)
  435. elif w2l_decoder == "kenlm":
  436. from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
  437. return W2lKenLMDecoder(args, task.target_dictionary)
  438. elif w2l_decoder == "fairseqlm":
  439. from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
  440. return W2lFairseqLMDecoder(args, task.target_dictionary)
  441. elif w2l_decoder == "parlance":
  442. from decoder.w2l_parlance import W2lParlance
  443. return W2lParlance(args, task.target_dictionary)
  444. elif w2l_decoder == "online":
  445. from decoder.w2l_parlance import W2lParlanceOnlineDecoder
  446. return W2lParlanceOnlineDecoder(args, task.target_dictionary)
  447. else:
  448. print(
  449. "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
  450. )
  451. # if __name__ == "__main__":
  452. # cli_main()
  453. # exit()
  454. ## for parlance online decoder test
  455. # sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
  456. # '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt',
  457. # '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
  458. # '--lm-model', '/root/project/speech_server/decoder/kakao3.bin', '--lm-weight', '2', '--word-score', '-1',
  459. # '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
  460. # '--max-tokens', '4000000', '--post-process', 'letter', '--cpu']
  461. # sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
  462. # '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt',
  463. # '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
  464. # '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '5', '--word-score', '-1',
  465. # '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
  466. # '--max-tokens', '4000000', '--post-process', 'letter']
  467. # sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
  468. # '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/estate_cer20.pt',
  469. # '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'viterbi',
  470. # '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '5', '--word-score', '-1',
  471. # '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
  472. # '--max-tokens', '4000000', '--post-process', 'letter']
  473. # sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
  474. # '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt',
  475. # '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
  476. # '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '0', '--word-score', '-1',
  477. # '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
  478. # '--max-tokens', '4000000', '--post-process', 'letter']
  479. ## no lm
  480. sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
  481. '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/estate_cer20.pt',
  482. '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
  483. '--word-score', '-1',
  484. '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
  485. '--max-tokens', '4000000', '--post-process', 'letter']
  486. parser = make_parser()
  487. args = options.parse_args_and_arch(parser)
  488. use_fp16 = args.fp16
  489. if args.max_tokens is None and args.batch_size is None:
  490. args.max_tokens = 4000000
  491. logger.info(args)
  492. use_cuda = torch.cuda.is_available() and not args.cpu
  493. use_cuda_str = 'cuda' if use_cuda else 'cpu'
  494. logger.info("| decoding with criterion {}".format(args.criterion))
  495. task = tasks.setup_task(args)
  496. logger.info("| loading model(s) from {}".format(args.path))
  497. models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
  498. utils.split_paths(args.path, separator="\\"),
  499. arg_overrides=ast.literal_eval(args.model_overrides),
  500. task=task,
  501. suffix=args.checkpoint_suffix,
  502. strict=(args.checkpoint_shard_count == 1),
  503. num_shards=args.checkpoint_shard_count,
  504. state=None,
  505. )
  506. ## optimize
  507. optimize_models(args, use_cuda, models)
  508. task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
  509. # Set dictionary
  510. tgt_dict = task.target_dictionary
  511. # labels = [k for k in tgt_dict.indices.keys()]
  512. logger.info(
  513. "| {} {} {} examples".format(
  514. args.data, args.gen_subset, len(task.dataset(args.gen_subset))
  515. )
  516. )
  517. generator = build_generator(args)
  518. def list_chunk(lst, n):
  519. return [lst[i:i+n] for i in range(0, len(lst), n)]
  520. def inference_online(audio):
  521. sample = dict()
  522. net_input = dict()
  523. feature = get_feature(audio)
  524. hypos_list = []
  525. sec = 3
  526. feature_list = list_chunk(feature, 16000 * sec)
  527. for idx, _feat in enumerate(feature_list):
  528. net_input["source"] = _feat.unsqueeze(0).to(use_cuda_str)
  529. padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
  530. net_input["padding_mask"] = padding_mask
  531. sample["net_input"] = net_input
  532. models[0].to(use_cuda_str)
  533. # hypos = task.inference_step(generator, models, sample, None)
  534. eos = False
  535. if idx == len(feature_list) - 1:
  536. eos = True
  537. with torch.no_grad():
  538. hypos = generator.generate(models, sample, eos=eos, prefix_tokens=None, constraints=None)
  539. hypos_list.append(hypos)
  540. logger.info(f"| | hypos_list => {hypos_list}")
  541. return hypos_list[-1]
  542. def inference(audio):
  543. sample = dict()
  544. net_input = dict()
  545. feature = get_feature(audio)
  546. net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
  547. logger.info(f'feature shape = {net_input["source"].shape}')
  548. padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
  549. net_input["padding_mask"] = padding_mask
  550. sample["net_input"] = net_input
  551. models[0].to(use_cuda_str)
  552. hypos = task.inference_step(generator, models, sample, None)
  553. if args.w2l_decoder == 'viterbi':
  554. for hypo in hypos[: min(len(hypos), args.nbest)]:
  555. hyp_pieces = tgt_dict.string(hypo[0]["tokens"].int().cpu())
  556. if "words" in hypo:
  557. hyp_words = " ".join(hypo["words"])
  558. else:
  559. hyp_words = post_process(hyp_pieces, args.post_process)
  560. hypos = hyp_words
  561. logger.info(f"| | hypos => {hypos}")
  562. return hypos
  563. def inference_file(file_list):
  564. sample = dict()
  565. net_input = dict()
  566. for _file in file_list:
  567. feature = get_feature_to_path(_file)
  568. net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
  569. padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
  570. net_input["padding_mask"] = padding_mask
  571. sample["net_input"] = net_input
  572. models[0].to(use_cuda_str)
  573. logger.info(f'feature = {sample["net_input"]["source"].shape}')
  574. hypos = task.inference_step(generator, models, sample, None)
  575. if args.w2l_decoder == 'viterbi':
  576. for hypo in hypos[: min(len(hypos), args.nbest)]:
  577. hyp_pieces = tgt_dict.string(hypo[0]["tokens"].int().cpu())
  578. if "words" in hypo:
  579. hyp_words = " ".join(hypo["words"])
  580. else:
  581. hyp_words = post_process(hyp_pieces, args.post_process)
  582. hypos = hyp_words
  583. logger.info(f"| | file => {_file}")
  584. logger.info(f"| | hypos => {hypos}")
  585. return ''
  586. feature = get_feature_to_path('/root/nas/data/estate_114/ogg/wav/202001/02/93687_5009_5009_99024777300_20200102103742.ogg')
  587. net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
  588. padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
  589. net_input["padding_mask"] = padding_mask
  590. sample["net_input"] = net_input
  591. models[0].to(use_cuda_str)
  592. hypos = task.inference_step(generator, models, sample, None)
  593. logger.info(f"| | hypos => {hypos}")
  594. return hypos