w2l_parlance.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. import torch
  2. from ctcdecode import CTCBeamDecoder, OnlineCTCBeamDecoder, DecoderState
  3. from examples.speech_recognition.w2l_decoder import W2lDecoder
  4. class W2lParlance(object):
  5. def __init__(self, args, tgt_dict):
  6. self.tgt_dict = [k for k in tgt_dict.indices.keys()]
  7. print('lm path, alpha = ', getattr(args, "kenlm_model", None), float(getattr(args, "lm_weight", 0.5 )))
  8. self.decoder = CTCBeamDecoder (
  9. self.tgt_dict,
  10. model_path = getattr(args, "kenlm_model", None),
  11. alpha = float(getattr(args, "lm_weight", 0.5 )),
  12. beta = 10,
  13. # beta = float(getattr(args, "beta", 0 )),
  14. cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
  15. cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )),
  16. beam_width = int(getattr(args, "beam_width", 100 )),
  17. num_processes = int(getattr(args, "num_processes", 4 )),
  18. blank_id = int(getattr(args, "blank_id", 0 )),
  19. log_probs_input=False
  20. )
  21. self.online = True
  22. # self.online = False
  23. def generate(self, models, sample, **unused):
  24. """Generate a batch of inferences."""
  25. # model.forward normally channels prev_output_tokens into the decoder
  26. # separately, but SequenceGenerator directly calls model.encoder
  27. encoder_input = {
  28. k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
  29. }
  30. emissions = self.get_emissions(models, encoder_input)
  31. emissions_softmax = torch.nn.functional.softmax(emissions, dim=2)
  32. return self.decode(emissions_softmax)
  33. def get_emissions(self, models, encoder_input):
  34. """Run encoder and normalize emissions"""
  35. model = models[0]
  36. encoder_out = model(**encoder_input)
  37. emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
  38. return emissions
  39. def convert_to_string(self, tokens, vocab, seq_len):
  40. return "".join([vocab[x] for x in tokens[0:seq_len]])
  41. def decode(self, emissions_softmax):
  42. if self.online:
  43. emissions_softmax = emissions_softmax[:,-75:-25,:]
  44. beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions_softmax)
  45. ## decoding option 1
  46. if len(beam_results[0]) == 0:
  47. return ''
  48. output_str = self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
  49. # print('output str = ', output_str)
  50. # if isinstance(output_str, list):
  51. # return output_str[0]
  52. ## decoding option 2
  53. # tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}]
  54. # def decode_opt( args, hypos, tgt_dict ):
  55. # for hypo in hypos[: min(len(hypos), args.nbest)]:
  56. # hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
  57. # if "words" in hypo:
  58. # hyp_words = " ".join(hypo["words"])
  59. # else:
  60. # hyp_words = post_process(hyp_pieces, args.post_process)
  61. # return hyp_words
  62. # output_str2 = decode_opt( args, tmp, tgt_dict )
  63. return output_str
  64. def str2bool(v):
  65. if isinstance(v, bool):
  66. return v
  67. if v.lower() in ('yes', 'true', 't', 'y', '1', 'True'):
  68. return True
  69. elif v.lower() in ('no', 'false', 'f', 'n', '0', 'False'):
  70. return False
  71. else:
  72. raise argparse.ArgumentTypeError('Boolean value expected.')
  73. class W2lParlanceOnlineDecoder(W2lDecoder):
  74. def __init__(self, args, tgt_dict):
  75. super().__init__(args, tgt_dict)
  76. print(args.kenlm_model, args.lm_weight)
  77. print(getattr(args, "kenlm_model", None), float(getattr(args, "lm_weight", 0.5 )) )
  78. self.tgt_dict = [k for k in tgt_dict.indices.keys()]
  79. self.decoder = OnlineCTCBeamDecoder (
  80. self.tgt_dict,
  81. model_path = getattr(args, "kenlm_model", None),
  82. alpha = float(getattr(args, "lm_weight", 0.5 )),
  83. beta = float(getattr(args, "beta", 10 )),
  84. cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
  85. cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )),
  86. beam_width = int(getattr(args, "beam_width", 100 )),
  87. num_processes = int(getattr(args, "num_processes", 4 )),
  88. blank_id = int(getattr(args, "blank_id", 0 )),
  89. log_probs_input=True
  90. )
  91. self.state = DecoderState(self.decoder)
  92. def generate(self, models, sample, eos=False, **kwargs):
  93. encoder_input = {
  94. k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
  95. }
  96. emissions = self.get_emissions(models, encoder_input)
  97. emissions = torch.nn.functional.softmax(emissions, dim=2).log()
  98. return self.decode(emissions, eos)
  99. # if hasattr(kwargs, 'inference'):
  100. # return self.inference(emissions_softmax, str2bool(getattr(kwargs, 'inference')))
  101. # else:
  102. # return self.decode(emissions_softmax)
  103. def get_emissions(self, models, encoder_input):
  104. model = models[0]
  105. encoder_out = model(**encoder_input)
  106. emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
  107. return emissions
  108. def convert_to_string(self, tokens, vocab, seq_len):
  109. return "".join([vocab[x] for x in tokens[0:seq_len]])
  110. def decode(self, emissions, eos=False):
  111. B, T, N = emissions.size()
  112. is_eos_s = [eos for _ in range(len(emissions))]
  113. beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s)
  114. # n = 16000 * 2
  115. # emissions_list = [emissions[:, i:i+n] for i in range(0, len(emissions), n)]
  116. # result = ''
  117. # for idx, _em in enumerate(emissions_list):
  118. # if idx == len(emissions_list) -1 :
  119. # is_eos_s = [True for _ in range(len(emissions))]
  120. # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s)
  121. # result += self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
  122. # return result
  123. # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [False])
  124. if len(beam_results[0]) == 0:
  125. return ''
  126. return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
  127. return [
  128. [{
  129. "tokens" : self.get_tokens(beam_results[idx][0][:out_lens[idx][0]]),
  130. "score" : beam_scores[idx]
  131. }]
  132. for idx in range(len(beam_results))
  133. ]
  134. def inference(self, emissions, end):
  135. print(len(emissions))
  136. emissions = emissions[:,-75:-25,:]
  137. beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [end])
  138. if len(beam_results[0]) == 0:
  139. print('check!!!')
  140. return ''
  141. return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")