1
0

w2l_parlance.py 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. import torch
  2. from ctcdecode import CTCBeamDecoder, OnlineCTCBeamDecoder, DecoderState
  3. from examples.speech_recognition.w2l_decoder import W2lDecoder
  4. class W2lParlance(object):
  5. def __init__(self, args, tgt_dict):
  6. self.tgt_dict = [k for k in tgt_dict.indices.keys()]
  7. lm_path = "/root/kakao3.arpa"
  8. alpha = 5
  9. print('lm path, alpha = ', lm_path, alpha)
  10. self.decoder = CTCBeamDecoder (
  11. self.tgt_dict,
  12. model_path = lm_path,
  13. alpha = alpha,
  14. beta = 10,
  15. # beta = float(getattr(args, "beta", 0 )),
  16. cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
  17. cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )),
  18. beam_width = int(getattr(args, "beam_width", 100 )),
  19. num_processes = int(getattr(args, "num_processes", 4 )),
  20. blank_id = int(getattr(args, "blank_id", 0 )),
  21. log_probs_input=False
  22. )
  23. def generate(self, models, sample, **unused):
  24. """Generate a batch of inferences."""
  25. # model.forward normally channels prev_output_tokens into the decoder
  26. # separately, but SequenceGenerator directly calls model.encoder
  27. encoder_input = {
  28. k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
  29. }
  30. emissions = self.get_emissions(models, encoder_input)
  31. emissions_softmax = torch.nn.functional.softmax(emissions, dim=2)
  32. return self.decode(emissions_softmax)
  33. def get_emissions(self, models, encoder_input):
  34. """Run encoder and normalize emissions"""
  35. model = models[0]
  36. encoder_out = model(**encoder_input)
  37. emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
  38. return emissions
  39. def convert_to_string(self, tokens, vocab, seq_len):
  40. return "".join([vocab[x] for x in tokens[0:seq_len]])
  41. def decode(self, emissions_softmax):
  42. emissions_softmax = emissions_softmax[:,-75:-25,:]
  43. beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions_softmax)
  44. ## decoding option 1
  45. if len(beam_results[0]) == 0:
  46. return ''
  47. output_str = self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
  48. # print('output str = ', output_str)
  49. # if isinstance(output_str, list):
  50. # return output_str[0]
  51. ## decoding option 2
  52. # tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}]
  53. # def decode_opt( args, hypos, tgt_dict ):
  54. # for hypo in hypos[: min(len(hypos), args.nbest)]:
  55. # hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
  56. # if "words" in hypo:
  57. # hyp_words = " ".join(hypo["words"])
  58. # else:
  59. # hyp_words = post_process(hyp_pieces, args.post_process)
  60. # return hyp_words
  61. # output_str2 = decode_opt( args, tmp, tgt_dict )
  62. return output_str
  63. def str2bool(v):
  64. if isinstance(v, bool):
  65. return v
  66. if v.lower() in ('yes', 'true', 't', 'y', '1', 'True'):
  67. return True
  68. elif v.lower() in ('no', 'false', 'f', 'n', '0', 'False'):
  69. return False
  70. else:
  71. raise argparse.ArgumentTypeError('Boolean value expected.')
  72. class W2lParlanceOnlineDecoder(W2lDecoder):
  73. def __init__(self, args, tgt_dict):
  74. super().__init__(args, tgt_dict)
  75. print(args.kenlm_model, args.lm_weight)
  76. print(getattr(args, "kenlm_model", None), float(getattr(args, "lm_weight", 0.5 )) )
  77. self.tgt_dict = [k for k in tgt_dict.indices.keys()]
  78. self.decoder = OnlineCTCBeamDecoder (
  79. self.tgt_dict,
  80. model_path = getattr(args, "kenlm_model", None),
  81. alpha = float(getattr(args, "lm_weight", 0.5 )),
  82. beta = float(getattr(args, "beta", 10 )),
  83. cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
  84. cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )),
  85. beam_width = int(getattr(args, "beam_width", 100 )),
  86. num_processes = int(getattr(args, "num_processes", 4 )),
  87. blank_id = int(getattr(args, "blank_id", 0 )),
  88. log_probs_input=True
  89. )
  90. self.state = DecoderState(self.decoder)
  91. def generate(self, models, sample, eos=False, **kwargs):
  92. encoder_input = {
  93. k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
  94. }
  95. emissions = self.get_emissions(models, encoder_input)
  96. emissions = torch.nn.functional.softmax(emissions, dim=2).log()
  97. return self.decode(emissions, eos)
  98. # if hasattr(kwargs, 'inference'):
  99. # return self.inference(emissions_softmax, str2bool(getattr(kwargs, 'inference')))
  100. # else:
  101. # return self.decode(emissions_softmax)
  102. def get_emissions(self, models, encoder_input):
  103. model = models[0]
  104. encoder_out = model(**encoder_input)
  105. emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
  106. return emissions
  107. def convert_to_string(self, tokens, vocab, seq_len):
  108. return "".join([vocab[x] for x in tokens[0:seq_len]])
  109. def decode(self, emissions, eos=False):
  110. B, T, N = emissions.size()
  111. is_eos_s = [eos for _ in range(len(emissions))]
  112. beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s)
  113. # n = 16000 * 2
  114. # emissions_list = [emissions[:, i:i+n] for i in range(0, len(emissions), n)]
  115. # result = ''
  116. # for idx, _em in enumerate(emissions_list):
  117. # if idx == len(emissions_list) -1 :
  118. # is_eos_s = [True for _ in range(len(emissions))]
  119. # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s)
  120. # result += self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
  121. # return result
  122. # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [False])
  123. if len(beam_results[0]) == 0:
  124. return ''
  125. return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
  126. return [
  127. [{
  128. "tokens" : self.get_tokens(beam_results[idx][0][:out_lens[idx][0]]),
  129. "score" : beam_scores[idx]
  130. }]
  131. for idx in range(len(beam_results))
  132. ]
  133. def inference(self, emissions, end):
  134. print(len(emissions))
  135. emissions = emissions[:,-75:-25,:]
  136. beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [end])
  137. if len(beam_results[0]) == 0:
  138. print('check!!!')
  139. return ''
  140. return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")