1
0

inference.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670
  1. import ast
  2. import logging
  3. import math
  4. import os
  5. import sys
  6. import editdistance
  7. import numpy as np
  8. import torch
  9. from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
  10. from fairseq.data.data_utils import post_process
  11. from fairseq.logging.meters import StopwatchMeter, TimeMeter
  12. import soundfile as sf
  13. import torch.nn.functional as F
  14. from ctcdecode import CTCBeamDecoder
  15. logging.basicConfig()
  16. logging.root.setLevel(logging.INFO)
  17. logging.basicConfig(level=logging.INFO)
  18. logger = logging.getLogger(__name__)
  19. def add_asr_eval_argument(parser):
  20. parser.add_argument("--kspmodel", default=None, help="sentence piece model")
  21. parser.add_argument(
  22. "--wfstlm", default=None, help="wfstlm on dictonary output units"
  23. )
  24. parser.add_argument(
  25. "--rnnt_decoding_type",
  26. default="greedy",
  27. help="wfstlm on dictonary\
  28. output units",
  29. )
  30. try:
  31. parser.add_argument(
  32. "--lm-weight",
  33. "--lm_weight",
  34. type=float,
  35. default=0.2,
  36. help="weight for lm while interpolating with neural score",
  37. )
  38. except:
  39. pass
  40. parser.add_argument(
  41. "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
  42. )
  43. parser.add_argument(
  44. "--w2l-decoder",
  45. choices=["viterbi", "kenlm", "fairseqlm", "parlance", "online"],
  46. help="use a w2l decoder",
  47. )
  48. parser.add_argument("--lexicon", help="lexicon for w2l decoder")
  49. parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
  50. parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
  51. parser.add_argument("--beam-threshold", type=float, default=25.0)
  52. parser.add_argument("--beam-size-token", type=float, default=100)
  53. parser.add_argument("--word-score", type=float, default=1.0)
  54. parser.add_argument("--unk-weight", type=float, default=-math.inf)
  55. parser.add_argument("--sil-weight", type=float, default=0.0)
  56. parser.add_argument(
  57. "--dump-emissions",
  58. type=str,
  59. default=None,
  60. help="if present, dumps emissions into this file and exits",
  61. )
  62. parser.add_argument(
  63. "--dump-features",
  64. type=str,
  65. default=None,
  66. help="if present, dumps features into this file and exits",
  67. )
  68. parser.add_argument(
  69. "--load-emissions",
  70. type=str,
  71. default=None,
  72. help="if present, loads emissions from this file",
  73. )
  74. return parser
  75. def check_args(args):
  76. # assert args.path is not None, "--path required for generation!"
  77. # assert args.results_path is not None, "--results_path required for generation!"
  78. assert (
  79. not args.sampling or args.nbest == args.beam
  80. ), "--sampling requires --nbest to be equal to --beam"
  81. assert (
  82. args.replace_unk is None or args.raw_text
  83. ), "--replace-unk requires a raw text dataset (--raw-text)"
  84. def get_dataset_itr(args, task, models):
  85. return task.get_batch_iterator(
  86. dataset=task.dataset(args.gen_subset),
  87. max_tokens=args.max_tokens,
  88. max_sentences=args.batch_size,
  89. max_positions=(sys.maxsize, sys.maxsize),
  90. ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
  91. required_batch_size_multiple=args.required_batch_size_multiple,
  92. num_shards=args.num_shards,
  93. shard_id=args.shard_id,
  94. num_workers=args.num_workers,
  95. data_buffer_size=args.data_buffer_size,
  96. ).next_epoch_itr(shuffle=False)
  97. def decode( args, hypos, tgt_dict ):
  98. for hypo in hypos[: min(len(hypos), args.nbest)]:
  99. hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
  100. if "words" in hypo:
  101. hyp_words = " ".join(hypo["words"])
  102. else:
  103. hyp_words = post_process(hyp_pieces, args.post_process)
  104. return hyp_words
  105. def process_predictions(
  106. args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
  107. ):
  108. for hypo in hypos[: min(len(hypos), args.nbest)]:
  109. hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
  110. if "words" in hypo:
  111. hyp_words = " ".join(hypo["words"])
  112. else:
  113. hyp_words = post_process(hyp_pieces, args.post_process)
  114. if res_files is not None:
  115. print(
  116. "{} ({}-{})".format(hyp_pieces, speaker, id),
  117. file=res_files["hypo.units"],
  118. )
  119. print(
  120. "{} ({}-{})".format(hyp_words, speaker, id),
  121. file=res_files["hypo.words"],
  122. )
  123. tgt_pieces = tgt_dict.string(target_tokens)
  124. tgt_words = post_process(tgt_pieces, args.post_process)
  125. if res_files is not None:
  126. print(
  127. "{} ({}-{})".format(tgt_pieces, speaker, id),
  128. file=res_files["ref.units"],
  129. )
  130. print(
  131. "{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
  132. )
  133. if not args.quiet:
  134. logger.info("HYPO:" + hyp_words)
  135. logger.info("TARGET:" + tgt_words)
  136. logger.info("___________________")
  137. hyp_words = hyp_words.split()
  138. tgt_words = tgt_words.split()
  139. return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
  140. def prepare_result_files(args):
  141. def get_res_file(file_prefix):
  142. if args.num_shards > 1:
  143. file_prefix = f"{args.shard_id}_{file_prefix}"
  144. path = os.path.join(
  145. args.results_path,
  146. "{}-{}-{}.txt".format(
  147. file_prefix, os.path.basename(args.path), args.gen_subset
  148. ),
  149. )
  150. return open(path, "w", buffering=1)
  151. if not args.results_path:
  152. return None
  153. return {
  154. "hypo.words": get_res_file("hypo.word"),
  155. "hypo.units": get_res_file("hypo.units"),
  156. "ref.words": get_res_file("ref.word"),
  157. "ref.units": get_res_file("ref.units"),
  158. }
  159. def optimize_models(args, use_cuda, models):
  160. """Optimize ensemble for generation"""
  161. for model in models:
  162. model.make_generation_fast_(
  163. beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
  164. need_attn=args.print_alignment,
  165. )
  166. if args.fp16:
  167. model.half()
  168. if use_cuda:
  169. model.cuda()
  170. def apply_half(t):
  171. if t.dtype is torch.float32:
  172. return t.to(dtype=torch.half)
  173. return t
  174. def get_feature_to_path(filepath):
  175. wav, sample_rate = sf.read(filepath)
  176. feats = torch.from_numpy(wav).float()
  177. feats = feature_postprocess(feats)
  178. return feats
  179. def get_feature(wav):
  180. audio = np.array(wav).squeeze()
  181. feats = torch.from_numpy(audio).float()
  182. feats = feature_postprocess(feats)
  183. return feats
  184. def feature_postprocess(feats):
  185. if feats.dim == 2:
  186. feats = feats.mean(-1)
  187. assert feats.dim() == 1, feats.dim()
  188. with torch.no_grad():
  189. feats = F.layer_norm(feats, feats.shape)
  190. return feats
  191. def convert_to_string(tokens, vocab, seq_len):
  192. return "".join([vocab[x] for x in tokens[0:seq_len]])
  193. def main(args, task=None, model_state=None):
  194. check_args(args)
  195. use_fp16 = args.fp16
  196. if args.max_tokens is None and args.batch_size is None:
  197. args.max_tokens = 4000000
  198. logger.info(args)
  199. use_cuda = torch.cuda.is_available() and not args.cpu
  200. logger.info("| decoding with criterion {}".format(args.criterion))
  201. task = tasks.setup_task(args)
  202. # Load ensemble
  203. if args.load_emissions:
  204. models, criterions = [], []
  205. task.load_dataset(args.gen_subset)
  206. else:
  207. logger.info("| loading model(s) from {}".format(args.path))
  208. models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
  209. utils.split_paths(args.path, separator="\\"),
  210. arg_overrides=ast.literal_eval(args.model_overrides),
  211. task=task,
  212. suffix=args.checkpoint_suffix,
  213. strict=(args.checkpoint_shard_count == 1),
  214. num_shards=args.checkpoint_shard_count,
  215. state=model_state,
  216. )
  217. optimize_models(args, use_cuda, models)
  218. task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
  219. # Set dictionary
  220. tgt_dict = task.target_dictionary
  221. logger.info(f"| | tgt_dict = {tgt_dict.indices}")
  222. labels = [k for k in tgt_dict.indices.keys()]
  223. # for k, v in tgt_dict.indices.items():
  224. # labels.append()
  225. logger.info(
  226. "| {} {} {} examples".format(
  227. args.data, args.gen_subset, len(task.dataset(args.gen_subset))
  228. )
  229. )
  230. # hack to pass transitions to W2lDecoder
  231. if args.criterion == "asg_loss":
  232. raise NotImplementedError("asg_loss is currently not supported")
  233. # trans = criterions[0].asg.trans.data
  234. # args.asg_transitions = torch.flatten(trans).tolist()
  235. # Load dataset (possibly sharded)
  236. itr = get_dataset_itr(args, task, models)
  237. # Initialize generator
  238. # gen_timer = StopwatchMeter()
  239. def build_generator(args):
  240. w2l_decoder = getattr(args, "w2l_decoder", None)
  241. if w2l_decoder == "viterbi":
  242. from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
  243. return W2lViterbiDecoder(args, task.target_dictionary)
  244. elif w2l_decoder == "kenlm":
  245. from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
  246. return W2lKenLMDecoder(args, task.target_dictionary)
  247. elif w2l_decoder == "fairseqlm":
  248. from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
  249. return W2lFairseqLMDecoder(args, task.target_dictionary)
  250. else:
  251. print(
  252. "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
  253. )
  254. # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
  255. generator = build_generator(args)
  256. if args.load_emissions:
  257. generator = ExistingEmissionsDecoder(
  258. generator, np.load(args.load_emissions, allow_pickle=True)
  259. )
  260. logger.info("loaded emissions from " + args.load_emissions)
  261. num_sentences = 0
  262. if args.results_path is not None and not os.path.exists(args.results_path):
  263. os.makedirs(args.results_path)
  264. max_source_pos = (
  265. utils.resolve_max_positions(
  266. task.max_positions(), *[model.max_positions() for model in models]
  267. ),
  268. )
  269. if max_source_pos is not None:
  270. max_source_pos = max_source_pos[0]
  271. if max_source_pos is not None:
  272. max_source_pos = max_source_pos[0] - 1
  273. if args.dump_emissions:
  274. emissions = {}
  275. if args.dump_features:
  276. features = {}
  277. models[0].bert.proj = None
  278. else:
  279. res_files = prepare_result_files(args)
  280. errs_t = 0
  281. lengths_t = 0
  282. ################ test code ################
  283. _sample = dict()
  284. _net_input = dict()
  285. _cuda = 'cpu'
  286. feature = get_feature_to_path("/root/mnt/data/kspon_ori/ogg/fork/KsponSpeech_01/KsponSpeech_0001/KsponSpeech_000002.ogg")
  287. _net_input["source"] = feature.unsqueeze(0).to(_cuda)
  288. padding_mask = torch.BoolTensor(_net_input["source"].size(1)).fill_(False).unsqueeze(0).to(_cuda)
  289. _net_input["padding_mask"] = padding_mask
  290. _sample["net_input"] = _net_input
  291. ## model cuda change
  292. models[0].to(_cuda)
  293. decoder = CTCBeamDecoder(
  294. labels,
  295. model_path=None,
  296. alpha=0,
  297. beta=0,
  298. cutoff_top_n=40,
  299. cutoff_prob=1.0,
  300. beam_width=100,
  301. num_processes=4,
  302. blank_id=0,
  303. log_probs_input=False
  304. )
  305. with torch.no_grad():
  306. encoder_input = {
  307. k: v for k, v in _sample["net_input"].items() if k != "prev_output_tokens"
  308. }
  309. model = models[0]
  310. encoder_out = model(**encoder_input)
  311. emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
  312. logger.info(f"| | emissions = {emissions}, {emissions.shape}")
  313. sft = torch.nn.functional.softmax(emissions, dim=2)
  314. logger.info(f"| | sft = {sft.shape}, {sft[0][0].sum()}")
  315. beam_results, beam_scores, timesteps, out_lens = decoder.decode(sft)
  316. output_str = convert_to_string(beam_results[0][0], labels, out_lens[0][0]).replace("|", " ")
  317. tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}]
  318. output_str2 = decode( args, tmp, tgt_dict )
  319. prefix_tokens = None
  320. hypos = task.inference_step(generator, models, _sample, prefix_tokens)
  321. logger.info(f"| | hypos = {hypos}")
  322. hypos = decode( args, hypos[0], tgt_dict )
  323. logger.info(f"| | hypos _ decoding = {hypos}")
  324. # beam_results, beam_scores, timesteps, out_lens = decoder.decode(torch.tensor(hypos))
  325. ################ test code ################
  326. with progress_bar.build_progress_bar(args, itr) as t:
  327. wps_meter = TimeMeter()
  328. # logger.info(f"| | in progress_bar = {t}")
  329. for sample in t:
  330. logger.info(f"| | in progress_bar | sample = {sample}")
  331. sample = utils.move_to_cuda(sample) if use_cuda else sample
  332. if use_fp16:
  333. sample = utils.apply_to_sample(apply_half, sample)
  334. if "net_input" not in sample:
  335. continue
  336. prefix_tokens = None
  337. if args.prefix_size > 0:
  338. prefix_tokens = sample["target"][:, : args.prefix_size]
  339. # gen_timer.start()
  340. # if args.dump_emissions:
  341. # with torch.no_grad():
  342. # encoder_out = models[0](**sample["net_input"])
  343. # emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
  344. # emm = emm.transpose(0, 1).cpu().numpy()
  345. # for i, id in enumerate(sample["id"]):
  346. # emissions[id.item()] = emm[i]
  347. # continue
  348. # elif args.dump_features:
  349. # with torch.no_grad():
  350. # encoder_out = models[0](**sample["net_input"])
  351. # feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
  352. # for i, id in enumerate(sample["id"]):
  353. # padding = (
  354. # encoder_out["encoder_padding_mask"][i].cpu().numpy()
  355. # if encoder_out["encoder_padding_mask"] is not None
  356. # else None
  357. # )
  358. # features[id.item()] = (feat[i], padding)
  359. # continue
  360. hypos = task.inference_step(generator, models, sample, prefix_tokens)
  361. print("hypos = ", hypos)
  362. print(f"tgt_dict = {tgt_dict}")
  363. exit()
  364. num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
  365. for i, sample_id in enumerate(sample["id"].tolist()):
  366. speaker = None
  367. # id = task.dataset(args.gen_subset).ids[int(sample_id)]
  368. id = sample_id
  369. toks = (
  370. sample["target"][i, :]
  371. if "target_label" not in sample
  372. else sample["target_label"][i, :]
  373. )
  374. target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
  375. # Process top predictions
  376. hypos = process_predictions(
  377. args,
  378. hypos[i],
  379. None,
  380. tgt_dict,
  381. target_tokens,
  382. res_files,
  383. speaker,
  384. id,
  385. )
  386. wps_meter.update(num_generated_tokens)
  387. t.log({"wps": round(wps_meter.avg)})
  388. num_sentences += (
  389. sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
  390. )
  391. wer = None
  392. if args.dump_emissions:
  393. emm_arr = []
  394. for i in range(len(emissions)):
  395. emm_arr.append(emissions[i])
  396. np.save(args.dump_emissions, emm_arr)
  397. logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
  398. elif args.dump_features:
  399. feat_arr = []
  400. for i in range(len(features)):
  401. feat_arr.append(features[i])
  402. np.save(args.dump_features, feat_arr)
  403. logger.info(f"saved {len(features)} emissions to {args.dump_features}")
  404. else:
  405. if lengths_t > 0:
  406. wer = errs_t * 100.0 / lengths_t
  407. logger.info(f"WER: {wer}")
  408. # logger.info(
  409. # "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
  410. # "sentences/s, {:.2f} tokens/s)".format(
  411. # num_sentences,
  412. # gen_timer.n,
  413. # gen_timer.sum,
  414. # num_sentences / gen_timer.sum,
  415. # 1.0 / gen_timer.avg,
  416. # )
  417. # )
  418. # logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
  419. return task, wer
  420. def make_parser():
  421. parser = options.get_generation_parser()
  422. parser = add_asr_eval_argument(parser)
  423. return parser
  424. def cli_main():
  425. parser = make_parser()
  426. args = options.parse_args_and_arch(parser)
  427. main(args)
  428. exit()
  429. def build_generator(args):
  430. w2l_decoder = getattr(args, "w2l_decoder", None)
  431. if w2l_decoder == "viterbi":
  432. from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
  433. return W2lViterbiDecoder(args, task.target_dictionary)
  434. elif w2l_decoder == "kenlm":
  435. from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
  436. return W2lKenLMDecoder(args, task.target_dictionary)
  437. elif w2l_decoder == "fairseqlm":
  438. from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
  439. return W2lFairseqLMDecoder(args, task.target_dictionary)
  440. elif w2l_decoder == "parlance":
  441. from decoder.w2l_parlance import W2lParlance
  442. return W2lParlance(args, task.target_dictionary)
  443. elif w2l_decoder == "online":
  444. from decoder.w2l_parlance import W2lParlanceOnlineDecoder
  445. return W2lParlanceOnlineDecoder(args, task.target_dictionary)
  446. else:
  447. print(
  448. "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
  449. )
  450. if __name__ == "__main__":
  451. cli_main()
  452. exit()
  453. ## for parlance online decoder test
  454. sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
  455. '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/kaist_best.pt'',
  456. '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
  457. '--lm-model', '/root/project/speech_server/decoder/kakao3.bin', '--lm-weight', '2', '--word-score', '-1',
  458. '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
  459. '--max-tokens', '4000000', '--post-process', 'letter', '--cpu']
  460. parser = make_parser()
  461. args = options.parse_args_and_arch(parser)
  462. use_fp16 = args.fp16
  463. if args.max_tokens is None and args.batch_size is None:
  464. args.max_tokens = 4000000
  465. logger.info(args)
  466. use_cuda = torch.cuda.is_available() and not args.cpu
  467. use_cuda_str = 'cuda' if use_cuda else 'cpu'
  468. logger.info("| decoding with criterion {}".format(args.criterion))
  469. task = tasks.setup_task(args)
  470. logger.info("| loading model(s) from {}".format(args.path))
  471. models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
  472. utils.split_paths(args.path, separator="\\"),
  473. arg_overrides=ast.literal_eval(args.model_overrides),
  474. task=task,
  475. suffix=args.checkpoint_suffix,
  476. strict=(args.checkpoint_shard_count == 1),
  477. num_shards=args.checkpoint_shard_count,
  478. state=None,
  479. )
  480. ## optimize
  481. optimize_models(args, use_cuda, models)
  482. task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
  483. # Set dictionary
  484. tgt_dict = task.target_dictionary
  485. # labels = [k for k in tgt_dict.indices.keys()]
  486. logger.info(
  487. "| {} {} {} examples".format(
  488. args.data, args.gen_subset, len(task.dataset(args.gen_subset))
  489. )
  490. )
  491. generator = build_generator(args)
  492. def list_chunk(lst, n):
  493. return [lst[i:i+n] for i in range(0, len(lst), n)]
  494. def inference_online(audio):
  495. sample = dict()
  496. net_input = dict()
  497. feature = get_feature(audio)
  498. hypos_list = []
  499. sec = 3
  500. feature_list = list_chunk(feature, 16000 * sec)
  501. for idx, _feat in enumerate(feature_list):
  502. net_input["source"] = _feat.unsqueeze(0).to(use_cuda_str)
  503. padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
  504. net_input["padding_mask"] = padding_mask
  505. sample["net_input"] = net_input
  506. models[0].to(use_cuda_str)
  507. # hypos = task.inference_step(generator, models, sample, None)
  508. eos = False
  509. if idx == len(feature_list) - 1:
  510. eos = True
  511. with torch.no_grad():
  512. hypos = generator.generate(models, sample, eos=eos, prefix_tokens=None, constraints=None)
  513. hypos_list.append(hypos)
  514. logger.info(f"| | hypos_list => {hypos_list}")
  515. return hypos_list[-1]
  516. def inference(audio):
  517. logger.info("| in inference func !!")
  518. sample = dict()
  519. net_input = dict()
  520. feature = get_feature(audio)
  521. net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
  522. logger.info(f'feature shape = {net_input["source"].shape}')
  523. padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
  524. net_input["padding_mask"] = padding_mask
  525. sample["net_input"] = net_input
  526. models[0].to(use_cuda_str)
  527. hypos = task.inference_step(generator, models, sample, None)
  528. logger.info(f"| | hypos => {hypos}")
  529. return hypos
  530. def inference_file():
  531. sample = dict()
  532. net_input = dict()
  533. feature = get_feature_to_path('/root/nas/data/kspon_with_aug/ogg/pcm_wav/test/eval_clean/KsponSpeech_E00001.ogg')
  534. net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
  535. padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
  536. net_input["padding_mask"] = padding_mask
  537. sample["net_input"] = net_input
  538. models[0].to(use_cuda_str)
  539. hypos = task.inference_step(generator, models, sample, None)
  540. logger.info(f"| | hypos => {hypos}")
  541. return hypos