123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186 |
- import torch
- from ctcdecode import CTCBeamDecoder, OnlineCTCBeamDecoder, DecoderState
- from examples.speech_recognition.w2l_decoder import W2lDecoder
- class W2lParlance(object):
- def __init__(self, args, tgt_dict):
- self.tgt_dict = [k for k in tgt_dict.indices.keys()]
- print('lm path, alpha = ', getattr(args, "kenlm_model", None), float(getattr(args, "lm_weight", 0.5 )))
- self.decoder = CTCBeamDecoder (
- self.tgt_dict,
- model_path = getattr(args, "kenlm_model", None),
- alpha = float(getattr(args, "lm_weight", 0.5 )),
- beta = 10,
- # beta = float(getattr(args, "beta", 0 )),
- cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
- cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )),
- beam_width = int(getattr(args, "beam_width", 100 )),
- num_processes = int(getattr(args, "num_processes", 4 )),
- blank_id = int(getattr(args, "blank_id", 0 )),
- log_probs_input=False
- )
- self.online = True
- # self.online = False
- def generate(self, models, sample, **unused):
- """Generate a batch of inferences."""
- # model.forward normally channels prev_output_tokens into the decoder
- # separately, but SequenceGenerator directly calls model.encoder
- encoder_input = {
- k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
- }
- emissions = self.get_emissions(models, encoder_input)
- emissions_softmax = torch.nn.functional.softmax(emissions, dim=2)
- return self.decode(emissions_softmax)
-
- def get_emissions(self, models, encoder_input):
- """Run encoder and normalize emissions"""
- model = models[0]
- encoder_out = model(**encoder_input)
- emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
- return emissions
- def convert_to_string(self, tokens, vocab, seq_len):
- return "".join([vocab[x] for x in tokens[0:seq_len]])
-
- def decode(self, emissions_softmax):
- if self.online:
- emissions_softmax = emissions_softmax[:,-75:-25,:]
- beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions_softmax)
-
- ## decoding option 1
- if len(beam_results[0]) == 0:
- return ''
- output_str = self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
- # print('output str = ', output_str)
- # if isinstance(output_str, list):
- # return output_str[0]
- ## decoding option 2
- # tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}]
- # def decode_opt( args, hypos, tgt_dict ):
- # for hypo in hypos[: min(len(hypos), args.nbest)]:
- # hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
- # if "words" in hypo:
- # hyp_words = " ".join(hypo["words"])
- # else:
- # hyp_words = post_process(hyp_pieces, args.post_process)
- # return hyp_words
- # output_str2 = decode_opt( args, tmp, tgt_dict )
- return output_str
-
- def str2bool(v):
- if isinstance(v, bool):
- return v
- if v.lower() in ('yes', 'true', 't', 'y', '1', 'True'):
- return True
- elif v.lower() in ('no', 'false', 'f', 'n', '0', 'False'):
- return False
- else:
- raise argparse.ArgumentTypeError('Boolean value expected.')
- class W2lParlanceOnlineDecoder(W2lDecoder):
- def __init__(self, args, tgt_dict):
- super().__init__(args, tgt_dict)
- print(args.kenlm_model, args.lm_weight)
- print(getattr(args, "kenlm_model", None), float(getattr(args, "lm_weight", 0.5 )) )
- self.tgt_dict = [k for k in tgt_dict.indices.keys()]
- self.decoder = OnlineCTCBeamDecoder (
- self.tgt_dict,
- model_path = getattr(args, "kenlm_model", None),
- alpha = float(getattr(args, "lm_weight", 0.5 )),
- beta = float(getattr(args, "beta", 10 )),
- cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
- cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )),
- beam_width = int(getattr(args, "beam_width", 100 )),
- num_processes = int(getattr(args, "num_processes", 4 )),
- blank_id = int(getattr(args, "blank_id", 0 )),
- log_probs_input=True
- )
- self.state = DecoderState(self.decoder)
- def generate(self, models, sample, eos=False, **kwargs):
- encoder_input = {
- k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
- }
- emissions = self.get_emissions(models, encoder_input)
- emissions = torch.nn.functional.softmax(emissions, dim=2).log()
- return self.decode(emissions, eos)
- # if hasattr(kwargs, 'inference'):
- # return self.inference(emissions_softmax, str2bool(getattr(kwargs, 'inference')))
- # else:
- # return self.decode(emissions_softmax)
-
- def get_emissions(self, models, encoder_input):
- model = models[0]
- encoder_out = model(**encoder_input)
- emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
- return emissions
- def convert_to_string(self, tokens, vocab, seq_len):
- return "".join([vocab[x] for x in tokens[0:seq_len]])
-
- def decode(self, emissions, eos=False):
- B, T, N = emissions.size()
- is_eos_s = [eos for _ in range(len(emissions))]
- beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s)
- # n = 16000 * 2
- # emissions_list = [emissions[:, i:i+n] for i in range(0, len(emissions), n)]
- # result = ''
-
-
- # for idx, _em in enumerate(emissions_list):
- # if idx == len(emissions_list) -1 :
- # is_eos_s = [True for _ in range(len(emissions))]
- # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s)
- # result += self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
- # return result
- # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [False])
- if len(beam_results[0]) == 0:
- return ''
- return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
- return [
- [{
- "tokens" : self.get_tokens(beam_results[idx][0][:out_lens[idx][0]]),
- "score" : beam_scores[idx]
- }]
- for idx in range(len(beam_results))
- ]
- def inference(self, emissions, end):
- print(len(emissions))
- emissions = emissions[:,-75:-25,:]
- beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [end])
- if len(beam_results[0]) == 0:
- print('check!!!')
- return ''
- return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
-
|