import torch from ctcdecode import CTCBeamDecoder, OnlineCTCBeamDecoder, DecoderState from examples.speech_recognition.w2l_decoder import W2lDecoder class W2lParlance(object): def __init__(self, args, tgt_dict): self.tgt_dict = [k for k in tgt_dict.indices.keys()] lm_path = "/root/kakao3.arpa" alpha = 5 print('lm path, alpha = ', lm_path, alpha) self.decoder = CTCBeamDecoder ( self.tgt_dict, model_path = lm_path, alpha = alpha, beta = 10, # beta = float(getattr(args, "beta", 0 )), cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )), cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )), beam_width = int(getattr(args, "beam_width", 100 )), num_processes = int(getattr(args, "num_processes", 4 )), blank_id = int(getattr(args, "blank_id", 0 )), log_probs_input=False ) def generate(self, models, sample, **unused): """Generate a batch of inferences.""" # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions = self.get_emissions(models, encoder_input) emissions_softmax = torch.nn.functional.softmax(emissions, dim=2) return self.decode(emissions_softmax) def get_emissions(self, models, encoder_input): """Run encoder and normalize emissions""" model = models[0] encoder_out = model(**encoder_input) emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous() return emissions def convert_to_string(self, tokens, vocab, seq_len): return "".join([vocab[x] for x in tokens[0:seq_len]]) def decode(self, emissions_softmax): emissions_softmax = emissions_softmax[:,-75:-25,:] beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions_softmax) ## decoding option 1 if len(beam_results[0]) == 0: return '' output_str = self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ") # print('output str = ', output_str) # if isinstance(output_str, list): # return output_str[0] ## decoding option 2 # tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}] # def decode_opt( args, hypos, tgt_dict ): # for hypo in hypos[: min(len(hypos), args.nbest)]: # hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu()) # if "words" in hypo: # hyp_words = " ".join(hypo["words"]) # else: # hyp_words = post_process(hyp_pieces, args.post_process) # return hyp_words # output_str2 = decode_opt( args, tmp, tgt_dict ) return output_str def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1', 'True'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0', 'False'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') class W2lParlanceOnlineDecoder(W2lDecoder): def __init__(self, args, tgt_dict): super().__init__(args, tgt_dict) print(args.kenlm_model, args.lm_weight) print(getattr(args, "kenlm_model", None), float(getattr(args, "lm_weight", 0.5 )) ) self.tgt_dict = [k for k in tgt_dict.indices.keys()] self.decoder = OnlineCTCBeamDecoder ( self.tgt_dict, model_path = getattr(args, "kenlm_model", None), alpha = float(getattr(args, "lm_weight", 0.5 )), beta = float(getattr(args, "beta", 10 )), cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )), cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )), beam_width = int(getattr(args, "beam_width", 100 )), num_processes = int(getattr(args, "num_processes", 4 )), blank_id = int(getattr(args, "blank_id", 0 )), log_probs_input=True ) self.state = DecoderState(self.decoder) def generate(self, models, sample, eos=False, **kwargs): encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions = self.get_emissions(models, encoder_input) emissions = torch.nn.functional.softmax(emissions, dim=2).log() return self.decode(emissions, eos) # if hasattr(kwargs, 'inference'): # return self.inference(emissions_softmax, str2bool(getattr(kwargs, 'inference'))) # else: # return self.decode(emissions_softmax) def get_emissions(self, models, encoder_input): model = models[0] encoder_out = model(**encoder_input) emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous() return emissions def convert_to_string(self, tokens, vocab, seq_len): return "".join([vocab[x] for x in tokens[0:seq_len]]) def decode(self, emissions, eos=False): B, T, N = emissions.size() is_eos_s = [eos for _ in range(len(emissions))] beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s) # n = 16000 * 2 # emissions_list = [emissions[:, i:i+n] for i in range(0, len(emissions), n)] # result = '' # for idx, _em in enumerate(emissions_list): # if idx == len(emissions_list) -1 : # is_eos_s = [True for _ in range(len(emissions))] # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s) # result += self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ") # return result # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [False]) if len(beam_results[0]) == 0: return '' return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ") return [ [{ "tokens" : self.get_tokens(beam_results[idx][0][:out_lens[idx][0]]), "score" : beam_scores[idx] }] for idx in range(len(beam_results)) ] def inference(self, emissions, end): print(len(emissions)) emissions = emissions[:,-75:-25,:] beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [end]) if len(beam_results[0]) == 0: print('check!!!') return '' return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")