123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743 |
- import ast
- import logging
- import math
- import os
- import sys
- import editdistance
- import numpy as np
- import torch
- from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
- from fairseq.data.data_utils import post_process
- from fairseq.logging.meters import StopwatchMeter, TimeMeter
- import soundfile as sf
- import torch.nn.functional as F
- import glob
- from ctcdecode import CTCBeamDecoder
- logging.basicConfig()
- logging.root.setLevel(logging.INFO)
- logging.basicConfig(level=logging.INFO)
- logger = logging.getLogger(__name__)
- def add_asr_eval_argument(parser):
- parser.add_argument("--kspmodel", default=None, help="sentence piece model")
- parser.add_argument(
- "--wfstlm", default=None, help="wfstlm on dictonary output units"
- )
- parser.add_argument(
- "--rnnt_decoding_type",
- default="greedy",
- help="wfstlm on dictonary\
- output units",
- )
- try:
- parser.add_argument(
- "--lm-weight",
- "--lm_weight",
- type=float,
- default=0.2,
- help="weight for lm while interpolating with neural score",
- )
- except:
- pass
- parser.add_argument(
- "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
- )
- parser.add_argument(
- "--w2l-decoder",
- choices=["viterbi", "kenlm", "fairseqlm", "parlance", "online"],
- help="use a w2l decoder",
- )
- parser.add_argument("--lexicon", help="lexicon for w2l decoder")
- parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
- parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
- parser.add_argument("--beam-threshold", type=float, default=25.0)
- parser.add_argument("--beam-size-token", type=float, default=100)
- parser.add_argument("--word-score", type=float, default=1.0)
- parser.add_argument("--unk-weight", type=float, default=-math.inf)
- parser.add_argument("--sil-weight", type=float, default=0.0)
- parser.add_argument(
- "--dump-emissions",
- type=str,
- default=None,
- help="if present, dumps emissions into this file and exits",
- )
- parser.add_argument(
- "--dump-features",
- type=str,
- default=None,
- help="if present, dumps features into this file and exits",
- )
- parser.add_argument(
- "--load-emissions",
- type=str,
- default=None,
- help="if present, loads emissions from this file",
- )
- return parser
- def check_args(args):
- # assert args.path is not None, "--path required for generation!"
- # assert args.results_path is not None, "--results_path required for generation!"
- assert (
- not args.sampling or args.nbest == args.beam
- ), "--sampling requires --nbest to be equal to --beam"
- assert (
- args.replace_unk is None or args.raw_text
- ), "--replace-unk requires a raw text dataset (--raw-text)"
- def get_dataset_itr(args, task, models):
- return task.get_batch_iterator(
- dataset=task.dataset(args.gen_subset),
- max_tokens=args.max_tokens,
- max_sentences=args.batch_size,
- max_positions=(sys.maxsize, sys.maxsize),
- ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
- required_batch_size_multiple=args.required_batch_size_multiple,
- num_shards=args.num_shards,
- shard_id=args.shard_id,
- num_workers=args.num_workers,
- data_buffer_size=args.data_buffer_size,
- ).next_epoch_itr(shuffle=False)
- def decode( args, hypos, tgt_dict ):
- for hypo in hypos[: min(len(hypos), args.nbest)]:
- hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
- if "words" in hypo:
- hyp_words = " ".join(hypo["words"])
- else:
- hyp_words = post_process(hyp_pieces, args.post_process)
- return hyp_words
- def process_predictions(
- args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
- ):
- for hypo in hypos[: min(len(hypos), args.nbest)]:
- hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
- if "words" in hypo:
- hyp_words = " ".join(hypo["words"])
- else:
- hyp_words = post_process(hyp_pieces, args.post_process)
- if res_files is not None:
- print(
- "{} ({}-{})".format(hyp_pieces, speaker, id),
- file=res_files["hypo.units"],
- )
- print(
- "{} ({}-{})".format(hyp_words, speaker, id),
- file=res_files["hypo.words"],
- )
- tgt_pieces = tgt_dict.string(target_tokens)
- tgt_words = post_process(tgt_pieces, args.post_process)
- if res_files is not None:
- print(
- "{} ({}-{})".format(tgt_pieces, speaker, id),
- file=res_files["ref.units"],
- )
- print(
- "{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
- )
- if not args.quiet:
- logger.info("HYPO:" + hyp_words)
- logger.info("TARGET:" + tgt_words)
- logger.info("___________________")
- hyp_words = hyp_words.split()
- tgt_words = tgt_words.split()
- return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
- def prepare_result_files(args):
- def get_res_file(file_prefix):
- if args.num_shards > 1:
- file_prefix = f"{args.shard_id}_{file_prefix}"
- path = os.path.join(
- args.results_path,
- "{}-{}-{}.txt".format(
- file_prefix, os.path.basename(args.path), args.gen_subset
- ),
- )
- return open(path, "w", buffering=1)
- if not args.results_path:
- return None
- return {
- "hypo.words": get_res_file("hypo.word"),
- "hypo.units": get_res_file("hypo.units"),
- "ref.words": get_res_file("ref.word"),
- "ref.units": get_res_file("ref.units"),
- }
- def optimize_models(args, use_cuda, models):
- """Optimize ensemble for generation"""
- for model in models:
- model.make_generation_fast_(
- beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
- need_attn=args.print_alignment,
- )
- if args.fp16:
- model.half()
- if use_cuda:
- model.cuda()
- def apply_half(t):
- if t.dtype is torch.float32:
- return t.to(dtype=torch.half)
- return t
- def get_feature_to_path(filepath):
- wav, sample_rate = sf.read(filepath)
- feats = torch.from_numpy(wav).float()
- feats = feature_postprocess(feats)
- return feats
- def get_feature(wav):
- audio = np.array(wav).squeeze()
- feats = torch.from_numpy(audio).float()
- feats = feature_postprocess(feats)
- return feats
- def feature_postprocess(feats):
- if feats.dim == 2:
- feats = feats.mean(-1)
-
- assert feats.dim() == 1, feats.dim()
- with torch.no_grad():
- feats = F.layer_norm(feats, feats.shape)
- return feats
- def convert_to_string(tokens, vocab, seq_len):
- return "".join([vocab[x] for x in tokens[0:seq_len]])
- def main(args, task=None, model_state=None):
- check_args(args)
- use_fp16 = args.fp16
- if args.max_tokens is None and args.batch_size is None:
- args.max_tokens = 4000000
- logger.info(args)
- use_cuda = torch.cuda.is_available() and not args.cpu
- logger.info("| decoding with criterion {}".format(args.criterion))
- task = tasks.setup_task(args)
- # Load ensemble
- if args.load_emissions:
- models, criterions = [], []
- task.load_dataset(args.gen_subset)
- else:
- logger.info("| loading model(s) from {}".format(args.path))
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
- utils.split_paths(args.path, separator="\\"),
- arg_overrides=ast.literal_eval(args.model_overrides),
- task=task,
- suffix=args.checkpoint_suffix,
- strict=(args.checkpoint_shard_count == 1),
- num_shards=args.checkpoint_shard_count,
- state=model_state,
- )
- optimize_models(args, use_cuda, models)
- task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
- # Set dictionary
- tgt_dict = task.target_dictionary
- logger.info(f"| | tgt_dict = {tgt_dict.indices}")
- labels = [k for k in tgt_dict.indices.keys()]
- # for k, v in tgt_dict.indices.items():
- # labels.append()
- logger.info(
- "| {} {} {} examples".format(
- args.data, args.gen_subset, len(task.dataset(args.gen_subset))
- )
- )
- # hack to pass transitions to W2lDecoder
- if args.criterion == "asg_loss":
- raise NotImplementedError("asg_loss is currently not supported")
- # trans = criterions[0].asg.trans.data
- # args.asg_transitions = torch.flatten(trans).tolist()
- # Load dataset (possibly sharded)
- itr = get_dataset_itr(args, task, models)
- # Initialize generator
- # gen_timer = StopwatchMeter()
- def build_generator(args):
- w2l_decoder = getattr(args, "w2l_decoder", None)
- if w2l_decoder == "viterbi":
- from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
- return W2lViterbiDecoder(args, task.target_dictionary)
- elif w2l_decoder == "kenlm":
- from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
- return W2lKenLMDecoder(args, task.target_dictionary)
- elif w2l_decoder == "fairseqlm":
- from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
- return W2lFairseqLMDecoder(args, task.target_dictionary)
- else:
- print(
- "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
- )
- # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
- generator = build_generator(args)
- if args.load_emissions:
- generator = ExistingEmissionsDecoder(
- generator, np.load(args.load_emissions, allow_pickle=True)
- )
- logger.info("loaded emissions from " + args.load_emissions)
- num_sentences = 0
- if args.results_path is not None and not os.path.exists(args.results_path):
- os.makedirs(args.results_path)
- max_source_pos = (
- utils.resolve_max_positions(
- task.max_positions(), *[model.max_positions() for model in models]
- ),
- )
- if max_source_pos is not None:
- max_source_pos = max_source_pos[0]
- if max_source_pos is not None:
- max_source_pos = max_source_pos[0] - 1
- if args.dump_emissions:
- emissions = {}
- if args.dump_features:
- features = {}
- models[0].bert.proj = None
- else:
- res_files = prepare_result_files(args)
- errs_t = 0
- lengths_t = 0
- ################ test code ################
- _sample = dict()
- _net_input = dict()
- _cuda = 'cpu'
- feature = get_feature_to_path("/root/mnt/data/kspon_ori/ogg/fork/KsponSpeech_01/KsponSpeech_0001/KsponSpeech_000002.ogg")
- _net_input["source"] = feature.unsqueeze(0).to(_cuda)
- padding_mask = torch.BoolTensor(_net_input["source"].size(1)).fill_(False).unsqueeze(0).to(_cuda)
- _net_input["padding_mask"] = padding_mask
- _sample["net_input"] = _net_input
- ## model cuda change
- models[0].to(_cuda)
- decoder = CTCBeamDecoder(
- labels,
- model_path=None,
- alpha=0,
- beta=0,
- cutoff_top_n=40,
- cutoff_prob=1.0,
- beam_width=100,
- num_processes=4,
- blank_id=0,
- log_probs_input=False
- )
-
- with torch.no_grad():
- encoder_input = {
- k: v for k, v in _sample["net_input"].items() if k != "prev_output_tokens"
- }
- model = models[0]
- encoder_out = model(**encoder_input)
- emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
- logger.info(f"| | emissions = {emissions}, {emissions.shape}")
- sft = torch.nn.functional.softmax(emissions, dim=2)
- logger.info(f"| | sft = {sft.shape}, {sft[0][0].sum()}")
- beam_results, beam_scores, timesteps, out_lens = decoder.decode(sft)
- output_str = convert_to_string(beam_results[0][0], labels, out_lens[0][0]).replace("|", " ")
- tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}]
- output_str2 = decode( args, tmp, tgt_dict )
- prefix_tokens = None
- hypos = task.inference_step(generator, models, _sample, prefix_tokens)
- logger.info(f"| | hypos = {hypos}")
- hypos = decode( args, hypos[0], tgt_dict )
- logger.info(f"| | hypos _ decoding = {hypos}")
- # beam_results, beam_scores, timesteps, out_lens = decoder.decode(torch.tensor(hypos))
- ################ test code ################
-
- with progress_bar.build_progress_bar(args, itr) as t:
- wps_meter = TimeMeter()
- # logger.info(f"| | in progress_bar = {t}")
- for sample in t:
- logger.info(f"| | in progress_bar | sample = {sample}")
- sample = utils.move_to_cuda(sample) if use_cuda else sample
- if use_fp16:
- sample = utils.apply_to_sample(apply_half, sample)
- if "net_input" not in sample:
- continue
- prefix_tokens = None
- if args.prefix_size > 0:
- prefix_tokens = sample["target"][:, : args.prefix_size]
- # gen_timer.start()
- # if args.dump_emissions:
- # with torch.no_grad():
- # encoder_out = models[0](**sample["net_input"])
- # emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
- # emm = emm.transpose(0, 1).cpu().numpy()
- # for i, id in enumerate(sample["id"]):
- # emissions[id.item()] = emm[i]
- # continue
- # elif args.dump_features:
- # with torch.no_grad():
- # encoder_out = models[0](**sample["net_input"])
- # feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
- # for i, id in enumerate(sample["id"]):
- # padding = (
- # encoder_out["encoder_padding_mask"][i].cpu().numpy()
- # if encoder_out["encoder_padding_mask"] is not None
- # else None
- # )
- # features[id.item()] = (feat[i], padding)
- # continue
- hypos = task.inference_step(generator, models, sample, prefix_tokens)
- print("hypos = ", hypos)
- print(f"tgt_dict = {tgt_dict}")
- exit()
- num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
- for i, sample_id in enumerate(sample["id"].tolist()):
- speaker = None
- # id = task.dataset(args.gen_subset).ids[int(sample_id)]
- id = sample_id
- toks = (
- sample["target"][i, :]
- if "target_label" not in sample
- else sample["target_label"][i, :]
- )
- target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
- # Process top predictions
- hypos = process_predictions(
- args,
- hypos[i],
- None,
- tgt_dict,
- target_tokens,
- res_files,
- speaker,
- id,
- )
- wps_meter.update(num_generated_tokens)
- t.log({"wps": round(wps_meter.avg)})
- num_sentences += (
- sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
- )
- wer = None
- if args.dump_emissions:
- emm_arr = []
- for i in range(len(emissions)):
- emm_arr.append(emissions[i])
- np.save(args.dump_emissions, emm_arr)
- logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
- elif args.dump_features:
- feat_arr = []
- for i in range(len(features)):
- feat_arr.append(features[i])
- np.save(args.dump_features, feat_arr)
- logger.info(f"saved {len(features)} emissions to {args.dump_features}")
- else:
- if lengths_t > 0:
- wer = errs_t * 100.0 / lengths_t
- logger.info(f"WER: {wer}")
- # logger.info(
- # "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
- # "sentences/s, {:.2f} tokens/s)".format(
- # num_sentences,
- # gen_timer.n,
- # gen_timer.sum,
- # num_sentences / gen_timer.sum,
- # 1.0 / gen_timer.avg,
- # )
- # )
- # logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
-
- return task, wer
- def make_parser():
- parser = options.get_generation_parser()
- parser = add_asr_eval_argument(parser)
- return parser
- def cli_main():
- parser = make_parser()
- args = options.parse_args_and_arch(parser)
- main(args)
- exit()
- def build_generator(args):
- w2l_decoder = getattr(args, "w2l_decoder", None)
- if w2l_decoder == "viterbi":
- from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
- return W2lViterbiDecoder(args, task.target_dictionary)
- elif w2l_decoder == "kenlm":
- from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
- return W2lKenLMDecoder(args, task.target_dictionary)
- elif w2l_decoder == "fairseqlm":
- from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
- return W2lFairseqLMDecoder(args, task.target_dictionary)
- elif w2l_decoder == "parlance":
- from decoder.w2l_parlance import W2lParlance
- return W2lParlance(args, task.target_dictionary)
- elif w2l_decoder == "online":
- from decoder.w2l_parlance import W2lParlanceOnlineDecoder
- return W2lParlanceOnlineDecoder(args, task.target_dictionary)
- else:
- print(
- "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
- )
- # if __name__ == "__main__":
- # cli_main()
- # exit()
- ## for parlance online decoder test
- # sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
- # '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt',
- # '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
- # '--lm-model', '/root/project/speech_server/decoder/kakao3.bin', '--lm-weight', '2', '--word-score', '-1',
- # '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
- # '--max-tokens', '4000000', '--post-process', 'letter', '--cpu']
- # sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
- # '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt',
- # '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
- # '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '5', '--word-score', '-1',
- # '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
- # '--max-tokens', '4000000', '--post-process', 'letter']
- # sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
- # '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/estate_cer20.pt',
- # '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'viterbi',
- # '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '5', '--word-score', '-1',
- # '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
- # '--max-tokens', '4000000', '--post-process', 'letter']
- # sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
- # '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt',
- # '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
- # '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '0', '--word-score', '-1',
- # '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
- # '--max-tokens', '4000000', '--post-process', 'letter']
- ## no lm
- sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest',
- '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/estate_cer20.pt',
- '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance',
- '--word-score', '-1',
- '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr',
- '--max-tokens', '4000000', '--post-process', 'letter']
- parser = make_parser()
- args = options.parse_args_and_arch(parser)
- use_fp16 = args.fp16
- if args.max_tokens is None and args.batch_size is None:
- args.max_tokens = 4000000
- logger.info(args)
- use_cuda = torch.cuda.is_available() and not args.cpu
- use_cuda_str = 'cuda' if use_cuda else 'cpu'
- logger.info("| decoding with criterion {}".format(args.criterion))
- task = tasks.setup_task(args)
- logger.info("| loading model(s) from {}".format(args.path))
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
- utils.split_paths(args.path, separator="\\"),
- arg_overrides=ast.literal_eval(args.model_overrides),
- task=task,
- suffix=args.checkpoint_suffix,
- strict=(args.checkpoint_shard_count == 1),
- num_shards=args.checkpoint_shard_count,
- state=None,
- )
- ## optimize
- optimize_models(args, use_cuda, models)
- task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
- # Set dictionary
- tgt_dict = task.target_dictionary
- # labels = [k for k in tgt_dict.indices.keys()]
- logger.info(
- "| {} {} {} examples".format(
- args.data, args.gen_subset, len(task.dataset(args.gen_subset))
- )
- )
- generator = build_generator(args)
- def list_chunk(lst, n):
- return [lst[i:i+n] for i in range(0, len(lst), n)]
- def inference_online(audio):
- sample = dict()
- net_input = dict()
- feature = get_feature(audio)
- hypos_list = []
- sec = 3
- feature_list = list_chunk(feature, 16000 * sec)
- for idx, _feat in enumerate(feature_list):
- net_input["source"] = _feat.unsqueeze(0).to(use_cuda_str)
- padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
- net_input["padding_mask"] = padding_mask
- sample["net_input"] = net_input
- models[0].to(use_cuda_str)
- # hypos = task.inference_step(generator, models, sample, None)
- eos = False
- if idx == len(feature_list) - 1:
- eos = True
-
- with torch.no_grad():
- hypos = generator.generate(models, sample, eos=eos, prefix_tokens=None, constraints=None)
-
- hypos_list.append(hypos)
-
- logger.info(f"| | hypos_list => {hypos_list}")
- return hypos_list[-1]
- def inference(audio):
- sample = dict()
- net_input = dict()
- feature = get_feature(audio)
- net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
- logger.info(f'feature shape = {net_input["source"].shape}')
- padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
- net_input["padding_mask"] = padding_mask
- sample["net_input"] = net_input
- models[0].to(use_cuda_str)
- hypos = task.inference_step(generator, models, sample, None)
- if args.w2l_decoder == 'viterbi':
- for hypo in hypos[: min(len(hypos), args.nbest)]:
- hyp_pieces = tgt_dict.string(hypo[0]["tokens"].int().cpu())
- if "words" in hypo:
- hyp_words = " ".join(hypo["words"])
- else:
- hyp_words = post_process(hyp_pieces, args.post_process)
- hypos = hyp_words
- logger.info(f"| | hypos => {hypos}")
-
- return hypos
- def inference_file(file_list):
- sample = dict()
- net_input = dict()
- for _file in file_list:
- feature = get_feature_to_path(_file)
- net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
- padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
- net_input["padding_mask"] = padding_mask
- sample["net_input"] = net_input
- models[0].to(use_cuda_str)
- logger.info(f'feature = {sample["net_input"]["source"].shape}')
- hypos = task.inference_step(generator, models, sample, None)
- if args.w2l_decoder == 'viterbi':
- for hypo in hypos[: min(len(hypos), args.nbest)]:
- hyp_pieces = tgt_dict.string(hypo[0]["tokens"].int().cpu())
- if "words" in hypo:
- hyp_words = " ".join(hypo["words"])
- else:
- hyp_words = post_process(hyp_pieces, args.post_process)
- hypos = hyp_words
-
- logger.info(f"| | file => {_file}")
- logger.info(f"| | hypos => {hypos}")
- return ''
- feature = get_feature_to_path('/root/nas/data/estate_114/ogg/wav/202001/02/93687_5009_5009_99024777300_20200102103742.ogg')
-
- net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
- padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
- net_input["padding_mask"] = padding_mask
- sample["net_input"] = net_input
- models[0].to(use_cuda_str)
- hypos = task.inference_step(generator, models, sample, None)
- logger.info(f"| | hypos => {hypos}")
-
- return hypos
|