Explorar o código

online decoder

ryu %!s(int64=2) %!d(string=hai) anos
achega
53be7e69c4
Modificáronse 7 ficheiros con 1196 adicións e 0 borrados
  1. 95 0
      client/qt.py
  2. 100 0
      client/record.py
  3. 35 0
      client/rest.py
  4. 0 0
      decoder/__init__.py
  5. 183 0
      decoder/w2l_parlance.py
  6. 670 0
      inference.py
  7. 113 0
      server.py

+ 95 - 0
client/qt.py

@@ -0,0 +1,95 @@
+import sys
+from PyQt5.QtWidgets import *
+from PyQt5.QtGui import QIcon
+from PyQt5.QtCore import *
+
+import time
+
+from record import Record
+
+
+class RecordWorker(QThread):
+    textB = pyqtSignal(str)
+
+    def __init__(self):
+        super().__init__()
+        self.power = False
+        self.text = ''
+        self.record = Record()
+    
+    def run(self):
+        self.power = True
+
+        # self.textB.emit("start !!\n")
+        while self.power == True:
+            self.record.record_unit(self.textB.emit)
+
+        self.record.record_unit(self.textB.emit)
+        self.textB.emit("\nend !!\n")
+
+    def stop(self):
+        self.power = False
+
+
+# class MyApp(QMainWindow):
+class MyApp(QWidget):
+    def __init__(self):
+        super().__init__()
+        self.text = ''
+        self.blank_cnt = 0
+        self.blank_max = 2
+
+        self.initUI()
+
+        self.t1 = RecordWorker()
+        self.t1.textB.connect(self.set_tb) ## 시그널 슬롯 등록
+
+        self.btn1.clicked.connect(self.thread_start)
+        self.btn2.clicked.connect(self.thread_stop)
+
+    def initUI(self):
+        self.setWindowTitle('BTS')
+        self.setWindowIcon(QIcon('labs_icon.jpg'))
+
+        self.btn1 = QPushButton('Run', self)
+
+        self.btn2 = QPushButton('Stop', self)
+
+        self.tb = QTextEdit()
+        self.tb.setReadOnly(True)
+        self.tb.setAcceptRichText(True)
+
+
+        self.vbox = QVBoxLayout()
+        self.vbox.addWidget(self.btn1, 0)
+        self.vbox.addWidget(self.btn2, 1)
+        self.vbox.addWidget(self.tb, 2)
+
+        self.setLayout(self.vbox)
+
+        self.setGeometry(300, 300, 800, 500)
+        self.show()
+
+    def thread_start(self):
+        self.t1.start()
+
+    def thread_stop(self):
+        self.t1.stop()
+
+    @pyqtSlot(str)
+    def set_tb(self, content):
+        if len(content) == 0 and self.text[-1] != "\n":
+            self.blank_cnt += 1
+            if self.blank_cnt >= self.blank_max:
+                self.blank_cnt = 0
+                self.text += '\n'
+                self.tb.setText(self.text)
+        else:
+            self.text += content
+            self.tb.setText(self.text)
+    
+
+if __name__ == '__main__':
+   app = QApplication(sys.argv) 
+   ex = MyApp()
+   sys.exit(app.exec_())

+ 100 - 0
client/record.py

@@ -0,0 +1,100 @@
+import pyaudio
+from collections import deque
+from array import array
+import numpy as np
+import requests
+import json
+import itertools
+
+from config.config import Config
+
+# fs = 16000
+# seconds = 1
+SERVER_URL = Config["server_url"]
+INFERENCE_STR = Config["inference_path"]
+TEST_STR = Config["test_path"]
+
+
+class DequeEncoder(json.JSONEncoder):
+    def default(self, obj):
+       if isinstance(obj, deque):
+          return list(obj)
+       return json.JSONEncoder.default(self, obj)
+
+
+class Record(object):
+    fs = 16000
+    seconds = 1
+    stride = fs * seconds
+    cnt = 0
+    def __init__(self):
+        self.cumul_audio = deque([], maxlen = self.fs * 5)
+        self.stream = pyaudio.PyAudio().open(format=pyaudio.paInt16, channels=1, rate=self.fs, input=True, output=True, frames_per_buffer=self.stride)
+
+    def unit_test(self, output_func):
+        self.cnt += 1
+        output_func(str(self.cnt))
+
+    def record_unit(self, output_func):
+        snd_raw = self.stream.read(self.stride, exception_on_overflow=False)
+        snd_data = array('h', snd_raw)
+
+        audio = np.float32(snd_data)/32000
+        audio = audio.tolist()
+
+        self.cumul_audio.extend(audio)
+
+        payload = {
+            "device":"0005",
+            "time":"seconds",
+            "recording":{
+                    "filename":"output.mp3",
+                    "content_type":"audio/mp3",
+                    "content": list(self.cumul_audio)
+                }
+        }
+
+        headers = {'Content-Type': 'application/json; charset=utf-8'}
+        r = requests.post(SERVER_URL + INFERENCE_STR, data=json.dumps(payload), headers=headers)
+        resJson = r.json()
+        print('resJson = ', resJson)
+        output_func(resJson['output'])
+
+
+
+def recording(output):
+    while True:
+        snd_raw = stream.read(stride, exception_on_overflow= False)
+        snd_data = array('h', snd_raw)
+        
+        audio = np.float32(snd_data)/32000
+        audio = audio.tolist()
+
+        cumul_audio.extend(audio)
+
+        payload = {
+            "device":"0005",
+            "time":"seconds",
+            "recording":{
+                "filename":"output.mp3",
+                "content_type":"audio/mp3",
+                "content": cumul_audio
+                }
+        }
+
+        headers = {'Content-Type': 'application/json; charset=utf-8'}
+
+        r = requests.post(SERVER_URL + TEST_STR, data=json.dumps(payload), headers=headers)
+        # print('success recording !! ', list(itertools.islice(cumul_audio, 0, 10)))
+        
+
+def record_chunk():
+    snd_raw = stream.read(stride, exception_on_overflow= False)
+    snd_data = array('h', snd_raw)
+    
+    audio = np.float32(snd_data)/32000
+    audio = audio.tolist()
+
+    cumul_audio.extend(audio)
+
+    return list(itertools.islice(cumul_audio, 0, 10))

+ 35 - 0
client/rest.py

@@ -0,0 +1,35 @@
+import json
+import requests, pickle
+import sounddevice as sd
+from config import config
+
+fs = 16000  # Sample rate
+seconds = 5  # Duration of recording
+SERVER_URL = config.CLIENT_CONFIG['server_url']
+INFERENCE_STR = config.CLIENT_CONFIG['inference_url']
+TEST_STR = config.CLIENT_CONFIG['test_url']
+
+## Record Part
+print("start Recording")
+audio = sd.rec(int(seconds * fs), samplerate=fs, channels=1)
+sd.wait()
+
+audio = audio.tolist()
+# audio = pickle.dumps(audio).decode('utf8')
+print("end Recording", type(audio), type(audio[0]), type(audio[0][0]))
+
+payload = {
+    "device":"0005",
+    "time":"seconds",
+    "recording":{
+        "filename":"output.mp3",
+        "content_type":"audio/mp3",
+        "content": audio
+    }
+}
+headers = {'Content-Type': 'application/json; charset=utf-8'}
+
+r = requests.post(SERVER_URL + INFERENCE_STR, data=json.dumps(payload), headers=headers)
+# r = requests.post(SERVER_URL + TEST_STR, data=payload, headers=headers)
+resJson = r.json()
+print(resJson)

+ 0 - 0
decoder/__init__.py


+ 183 - 0
decoder/w2l_parlance.py

@@ -0,0 +1,183 @@
+import torch
+from ctcdecode import CTCBeamDecoder, OnlineCTCBeamDecoder, DecoderState
+
+from examples.speech_recognition.w2l_decoder import W2lDecoder
+
+class W2lParlance(object):
+    def __init__(self, args, tgt_dict):
+        self.tgt_dict = [k for k in tgt_dict.indices.keys()]
+        lm_path = "/root/kakao3.arpa"
+        alpha = 5
+        print('lm path, alpha = ', lm_path, alpha)
+        self.decoder = CTCBeamDecoder (
+            self.tgt_dict,
+            model_path = lm_path,
+            alpha = alpha,
+            beta = 10,
+            # beta = float(getattr(args, "beta", 0 )),
+            cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
+            cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )),
+            beam_width = int(getattr(args, "beam_width", 100 )),
+            num_processes = int(getattr(args, "num_processes", 4 )),
+            blank_id = int(getattr(args, "blank_id", 0 )),
+            log_probs_input=False
+        )
+
+    def generate(self, models, sample, **unused):
+        """Generate a batch of inferences."""
+        # model.forward normally channels prev_output_tokens into the decoder
+        # separately, but SequenceGenerator directly calls model.encoder
+
+        encoder_input = {
+            k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
+        }
+
+        emissions = self.get_emissions(models, encoder_input)
+        emissions_softmax = torch.nn.functional.softmax(emissions, dim=2)
+        return self.decode(emissions_softmax)
+    
+    def get_emissions(self, models, encoder_input):
+        """Run encoder and normalize emissions"""
+
+        model = models[0]
+        encoder_out = model(**encoder_input)
+        emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
+
+        return emissions
+
+    def convert_to_string(self, tokens, vocab, seq_len):
+        return "".join([vocab[x] for x in tokens[0:seq_len]])
+    
+    def decode(self, emissions_softmax):
+        emissions_softmax = emissions_softmax[:,-75:-25,:]
+
+        beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions_softmax)
+        
+        ## decoding option 1
+        if len(beam_results[0]) == 0:
+            return ''
+        output_str = self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
+
+        # print('output str = ', output_str)
+        # if isinstance(output_str, list):
+        #     return output_str[0]
+        ## decoding option 2
+        # tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}]
+        # def decode_opt( args, hypos, tgt_dict ):
+        #     for hypo in hypos[: min(len(hypos), args.nbest)]:
+        #         hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
+
+        #         if "words" in hypo:
+        #             hyp_words = " ".join(hypo["words"])
+        #         else:
+        #             hyp_words = post_process(hyp_pieces, args.post_process)
+
+        #         return hyp_words
+        # output_str2 = decode_opt( args, tmp, tgt_dict )
+
+        return output_str
+    
+def str2bool(v):
+    if isinstance(v, bool):
+       return v
+    if v.lower() in ('yes', 'true', 't', 'y', '1', 'True'):
+        return True
+    elif v.lower() in ('no', 'false', 'f', 'n', '0', 'False'):
+        return False
+    else:
+        raise argparse.ArgumentTypeError('Boolean value expected.')
+
+class W2lParlanceOnlineDecoder(W2lDecoder):
+    def __init__(self, args, tgt_dict):
+        super().__init__(args, tgt_dict)
+
+        print(args.kenlm_model, args.lm_weight)
+        print(getattr(args, "kenlm_model", None), float(getattr(args, "lm_weight", 0.5 )) )
+
+        self.tgt_dict = [k for k in tgt_dict.indices.keys()]
+        self.decoder = OnlineCTCBeamDecoder (
+            self.tgt_dict,
+            model_path = getattr(args, "kenlm_model", None),
+            alpha = float(getattr(args, "lm_weight", 0.5 )),
+            beta = float(getattr(args, "beta", 10 )),
+            cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
+            cutoff_prob = float(getattr(args, "cutoff_prob", 1.0 )),
+            beam_width = int(getattr(args, "beam_width", 100 )),
+            num_processes = int(getattr(args, "num_processes", 4 )),
+            blank_id = int(getattr(args, "blank_id", 0 )),
+            log_probs_input=True
+        )
+
+        self.state = DecoderState(self.decoder)
+
+    def generate(self, models, sample, eos=False, **kwargs):
+        encoder_input = {
+            k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens"
+        }
+
+        emissions = self.get_emissions(models, encoder_input)
+        emissions = torch.nn.functional.softmax(emissions, dim=2).log()
+        return self.decode(emissions, eos)
+
+        # if hasattr(kwargs, 'inference'):
+        #     return self.inference(emissions_softmax, str2bool(getattr(kwargs, 'inference')))
+        # else:
+        #     return self.decode(emissions_softmax)
+    
+    def get_emissions(self, models, encoder_input):
+        model = models[0]
+        encoder_out = model(**encoder_input)
+        emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
+
+        return emissions
+
+    def convert_to_string(self, tokens, vocab, seq_len):
+        return "".join([vocab[x] for x in tokens[0:seq_len]])
+    
+    def decode(self, emissions, eos=False):
+
+        B, T, N = emissions.size()
+
+        is_eos_s = [eos for _ in range(len(emissions))]
+
+        beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s)
+
+        # n = 16000 * 2
+        # emissions_list = [emissions[:, i:i+n] for i in range(0, len(emissions), n)]
+        # result = ''
+        
+        
+        # for idx, _em in enumerate(emissions_list):
+
+        #     if idx == len(emissions_list) -1 :
+        #         is_eos_s = [True for _ in range(len(emissions))] 
+        #     beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], is_eos_s)
+        #     result += self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
+
+        # return result
+
+
+        # beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [False])
+        if len(beam_results[0]) == 0:
+            return ''
+        return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
+
+        return [
+            [{
+                "tokens" : self.get_tokens(beam_results[idx][0][:out_lens[idx][0]]),
+                "score" : beam_scores[idx]
+            }]
+            for idx in range(len(beam_results))
+        ]
+
+    def inference(self, emissions, end):
+        print(len(emissions))
+        emissions = emissions[:,-75:-25,:]
+        beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions, [self.state], [end])
+
+        if len(beam_results[0]) == 0:
+            print('check!!!')
+            return ''
+
+        return self.convert_to_string(beam_results[0][0], self.tgt_dict, out_lens[0][0]).replace("|", " ")
+        

+ 670 - 0
inference.py

@@ -0,0 +1,670 @@
+import ast
+import logging
+import math
+import os
+import sys
+
+import editdistance
+import numpy as np
+import torch
+from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
+from fairseq.data.data_utils import post_process
+from fairseq.logging.meters import StopwatchMeter, TimeMeter
+
+import soundfile as sf
+import torch.nn.functional as F
+
+from ctcdecode import CTCBeamDecoder
+
+logging.basicConfig()
+logging.root.setLevel(logging.INFO)
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+def add_asr_eval_argument(parser):
+    parser.add_argument("--kspmodel", default=None, help="sentence piece model")
+    parser.add_argument(
+        "--wfstlm", default=None, help="wfstlm on dictonary output units"
+    )
+    parser.add_argument(
+        "--rnnt_decoding_type",
+        default="greedy",
+        help="wfstlm on dictonary\
+output units",
+    )
+    try:
+        parser.add_argument(
+            "--lm-weight",
+            "--lm_weight",
+            type=float,
+            default=0.2,
+            help="weight for lm while interpolating with neural score",
+        )
+    except:
+        pass
+    parser.add_argument(
+        "--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
+    )
+    parser.add_argument(
+        "--w2l-decoder",
+        choices=["viterbi", "kenlm", "fairseqlm", "parlance", "online"],
+        help="use a w2l decoder",
+    )
+    parser.add_argument("--lexicon", help="lexicon for w2l decoder")
+    parser.add_argument("--unit-lm", action="store_true", help="if using a unit lm")
+    parser.add_argument("--kenlm-model", "--lm-model", help="lm model for w2l decoder")
+    parser.add_argument("--beam-threshold", type=float, default=25.0)
+    parser.add_argument("--beam-size-token", type=float, default=100)
+    parser.add_argument("--word-score", type=float, default=1.0)
+    parser.add_argument("--unk-weight", type=float, default=-math.inf)
+    parser.add_argument("--sil-weight", type=float, default=0.0)
+    parser.add_argument(
+        "--dump-emissions",
+        type=str,
+        default=None,
+        help="if present, dumps emissions into this file and exits",
+    )
+    parser.add_argument(
+        "--dump-features",
+        type=str,
+        default=None,
+        help="if present, dumps features into this file and exits",
+    )
+    parser.add_argument(
+        "--load-emissions",
+        type=str,
+        default=None,
+        help="if present, loads emissions from this file",
+    )
+    return parser
+
+
+def check_args(args):
+    # assert args.path is not None, "--path required for generation!"
+    # assert args.results_path is not None, "--results_path required for generation!"
+    assert (
+        not args.sampling or args.nbest == args.beam
+    ), "--sampling requires --nbest to be equal to --beam"
+    assert (
+        args.replace_unk is None or args.raw_text
+    ), "--replace-unk requires a raw text dataset (--raw-text)"
+
+def get_dataset_itr(args, task, models):
+    return task.get_batch_iterator(
+        dataset=task.dataset(args.gen_subset),
+        max_tokens=args.max_tokens,
+        max_sentences=args.batch_size,
+        max_positions=(sys.maxsize, sys.maxsize),
+        ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
+        required_batch_size_multiple=args.required_batch_size_multiple,
+        num_shards=args.num_shards,
+        shard_id=args.shard_id,
+        num_workers=args.num_workers,
+        data_buffer_size=args.data_buffer_size,
+    ).next_epoch_itr(shuffle=False)
+
+def decode( args, hypos, tgt_dict ):
+    for hypo in hypos[: min(len(hypos), args.nbest)]:
+        hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
+
+        if "words" in hypo:
+            hyp_words = " ".join(hypo["words"])
+        else:
+            hyp_words = post_process(hyp_pieces, args.post_process)
+
+        return hyp_words
+
+def process_predictions(
+    args, hypos, sp, tgt_dict, target_tokens, res_files, speaker, id
+):
+    for hypo in hypos[: min(len(hypos), args.nbest)]:
+        hyp_pieces = tgt_dict.string(hypo["tokens"].int().cpu())
+
+        if "words" in hypo:
+            hyp_words = " ".join(hypo["words"])
+        else:
+            hyp_words = post_process(hyp_pieces, args.post_process)
+
+        if res_files is not None:
+            print(
+                "{} ({}-{})".format(hyp_pieces, speaker, id),
+                file=res_files["hypo.units"],
+            )
+            print(
+                "{} ({}-{})".format(hyp_words, speaker, id),
+                file=res_files["hypo.words"],
+            )
+
+        tgt_pieces = tgt_dict.string(target_tokens)
+        tgt_words = post_process(tgt_pieces, args.post_process)
+
+        if res_files is not None:
+            print(
+                "{} ({}-{})".format(tgt_pieces, speaker, id),
+                file=res_files["ref.units"],
+            )
+            print(
+                "{} ({}-{})".format(tgt_words, speaker, id), file=res_files["ref.words"]
+            )
+
+        if not args.quiet:
+            logger.info("HYPO:" + hyp_words)
+            logger.info("TARGET:" + tgt_words)
+            logger.info("___________________")
+
+        hyp_words = hyp_words.split()
+        tgt_words = tgt_words.split()
+        return editdistance.eval(hyp_words, tgt_words), len(tgt_words)
+
+def prepare_result_files(args):
+    def get_res_file(file_prefix):
+        if args.num_shards > 1:
+            file_prefix = f"{args.shard_id}_{file_prefix}"
+        path = os.path.join(
+            args.results_path,
+            "{}-{}-{}.txt".format(
+                file_prefix, os.path.basename(args.path), args.gen_subset
+            ),
+        )
+        return open(path, "w", buffering=1)
+
+    if not args.results_path:
+        return None
+
+    return {
+        "hypo.words": get_res_file("hypo.word"),
+        "hypo.units": get_res_file("hypo.units"),
+        "ref.words": get_res_file("ref.word"),
+        "ref.units": get_res_file("ref.units"),
+    }
+
+def optimize_models(args, use_cuda, models):
+    """Optimize ensemble for generation"""
+    for model in models:
+        model.make_generation_fast_(
+            beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
+            need_attn=args.print_alignment,
+        )
+        if args.fp16:
+            model.half()
+        if use_cuda:
+            model.cuda()
+
+def apply_half(t):
+    if t.dtype is torch.float32:
+        return t.to(dtype=torch.half)
+    return t
+
+def get_feature_to_path(filepath):
+    wav, sample_rate = sf.read(filepath)
+    feats = torch.from_numpy(wav).float()
+    feats = feature_postprocess(feats)
+
+    return feats
+
+def get_feature(wav):
+    audio = np.array(wav).squeeze()
+    feats = torch.from_numpy(audio).float()
+    feats = feature_postprocess(feats)
+
+    return feats
+
+def feature_postprocess(feats):
+    if feats.dim == 2:
+        feats = feats.mean(-1)
+    
+    assert feats.dim() == 1, feats.dim()
+
+    with torch.no_grad():
+        feats = F.layer_norm(feats, feats.shape)
+    return feats
+
+def convert_to_string(tokens, vocab, seq_len):
+    return "".join([vocab[x] for x in tokens[0:seq_len]])
+
+def main(args, task=None, model_state=None):
+    check_args(args)
+
+    use_fp16 = args.fp16
+    if args.max_tokens is None and args.batch_size is None:
+        args.max_tokens = 4000000
+    logger.info(args)
+
+    use_cuda = torch.cuda.is_available() and not args.cpu
+
+    logger.info("| decoding with criterion {}".format(args.criterion))
+
+    task = tasks.setup_task(args)
+
+    # Load ensemble
+    if args.load_emissions:
+        models, criterions = [], []
+        task.load_dataset(args.gen_subset)
+    else:
+        logger.info("| loading model(s) from {}".format(args.path))
+        models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
+            utils.split_paths(args.path, separator="\\"),
+            arg_overrides=ast.literal_eval(args.model_overrides),
+            task=task,
+            suffix=args.checkpoint_suffix,
+            strict=(args.checkpoint_shard_count == 1),
+            num_shards=args.checkpoint_shard_count,
+            state=model_state,
+        )
+        optimize_models(args, use_cuda, models)
+        task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
+
+
+    # Set dictionary
+    tgt_dict = task.target_dictionary
+
+    logger.info(f"| | tgt_dict = {tgt_dict.indices}")
+    labels = [k for k in tgt_dict.indices.keys()]
+    # for k, v in tgt_dict.indices.items():
+    #     labels.append()
+
+    logger.info(
+        "| {} {} {} examples".format(
+            args.data, args.gen_subset, len(task.dataset(args.gen_subset))
+        )
+    )
+
+    # hack to pass transitions to W2lDecoder
+    if args.criterion == "asg_loss":
+        raise NotImplementedError("asg_loss is currently not supported")
+        # trans = criterions[0].asg.trans.data
+        # args.asg_transitions = torch.flatten(trans).tolist()
+
+    # Load dataset (possibly sharded)
+    itr = get_dataset_itr(args, task, models)
+
+    # Initialize generator
+    # gen_timer = StopwatchMeter()
+
+    def build_generator(args):
+        w2l_decoder = getattr(args, "w2l_decoder", None)
+        if w2l_decoder == "viterbi":
+            from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
+
+            return W2lViterbiDecoder(args, task.target_dictionary)
+        elif w2l_decoder == "kenlm":
+            from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
+
+            return W2lKenLMDecoder(args, task.target_dictionary)
+        elif w2l_decoder == "fairseqlm":
+            from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
+
+            return W2lFairseqLMDecoder(args, task.target_dictionary)
+        else:
+            print(
+                "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
+            )
+
+    # please do not touch this unless you test both generate.py and infer.py with audio_pretraining task
+    generator = build_generator(args)
+
+    if args.load_emissions:
+        generator = ExistingEmissionsDecoder(
+            generator, np.load(args.load_emissions, allow_pickle=True)
+        )
+        logger.info("loaded emissions from " + args.load_emissions)
+
+    num_sentences = 0
+
+    if args.results_path is not None and not os.path.exists(args.results_path):
+        os.makedirs(args.results_path)
+
+    max_source_pos = (
+        utils.resolve_max_positions(
+            task.max_positions(), *[model.max_positions() for model in models]
+        ),
+    )
+
+    if max_source_pos is not None:
+        max_source_pos = max_source_pos[0]
+        if max_source_pos is not None:
+            max_source_pos = max_source_pos[0] - 1
+
+    if args.dump_emissions:
+        emissions = {}
+    if args.dump_features:
+        features = {}
+        models[0].bert.proj = None
+    else:
+        res_files = prepare_result_files(args)
+    errs_t = 0
+    lengths_t = 0
+
+    ################ test code ################
+    _sample = dict()
+    _net_input = dict()
+
+    _cuda = 'cpu'
+
+    feature = get_feature_to_path("/root/mnt/data/kspon_ori/ogg/fork/KsponSpeech_01/KsponSpeech_0001/KsponSpeech_000002.ogg")
+    _net_input["source"] = feature.unsqueeze(0).to(_cuda)
+    padding_mask = torch.BoolTensor(_net_input["source"].size(1)).fill_(False).unsqueeze(0).to(_cuda)
+
+    _net_input["padding_mask"] = padding_mask
+    _sample["net_input"] = _net_input
+
+    ## model cuda change
+    models[0].to(_cuda)
+
+    decoder = CTCBeamDecoder(
+        labels,
+        model_path=None,
+        alpha=0,
+        beta=0,
+        cutoff_top_n=40,
+        cutoff_prob=1.0,
+        beam_width=100,
+        num_processes=4,
+        blank_id=0,
+        log_probs_input=False
+    )
+
+    
+    with torch.no_grad():
+        encoder_input = {
+            k: v for k, v in _sample["net_input"].items() if k != "prev_output_tokens"
+        }
+        model = models[0]
+        encoder_out = model(**encoder_input)
+        emissions = model.get_logits(encoder_out).transpose(0, 1).float().cpu().contiguous()
+        logger.info(f"| | emissions = {emissions}, {emissions.shape}")
+
+        sft = torch.nn.functional.softmax(emissions, dim=2)
+        logger.info(f"| | sft = {sft.shape}, {sft[0][0].sum()}")
+        beam_results, beam_scores, timesteps, out_lens = decoder.decode(sft)
+        output_str = convert_to_string(beam_results[0][0], labels, out_lens[0][0]).replace("|", " ")
+
+        tmp = [{'tokens': beam_results[0][0][:out_lens[0][0]]}]
+        output_str2 = decode( args, tmp, tgt_dict )
+
+    prefix_tokens = None
+    hypos = task.inference_step(generator, models, _sample, prefix_tokens)
+    logger.info(f"| | hypos = {hypos}")
+    hypos = decode( args, hypos[0], tgt_dict )
+    logger.info(f"| | hypos _ decoding = {hypos}")
+
+    # beam_results, beam_scores, timesteps, out_lens = decoder.decode(torch.tensor(hypos))
+
+
+    ################ test code ################
+
+    
+    with progress_bar.build_progress_bar(args, itr) as t:
+        wps_meter = TimeMeter()
+        # logger.info(f"| | in progress_bar = {t}")
+
+        for sample in t:
+            logger.info(f"| | in progress_bar | sample = {sample}")
+
+            sample = utils.move_to_cuda(sample) if use_cuda else sample
+            if use_fp16:
+                sample = utils.apply_to_sample(apply_half, sample)
+            if "net_input" not in sample:
+                continue
+
+            prefix_tokens = None
+            if args.prefix_size > 0:
+                prefix_tokens = sample["target"][:, : args.prefix_size]
+
+            # gen_timer.start()
+            # if args.dump_emissions:
+            #     with torch.no_grad():
+            #         encoder_out = models[0](**sample["net_input"])
+            #         emm = models[0].get_normalized_probs(encoder_out, log_probs=True)
+            #         emm = emm.transpose(0, 1).cpu().numpy()
+            #         for i, id in enumerate(sample["id"]):
+            #             emissions[id.item()] = emm[i]
+            #         continue
+            # elif args.dump_features:
+            #     with torch.no_grad():
+            #         encoder_out = models[0](**sample["net_input"])
+            #         feat = encoder_out["encoder_out"].transpose(0, 1).cpu().numpy()
+            #         for i, id in enumerate(sample["id"]):
+            #             padding = (
+            #                 encoder_out["encoder_padding_mask"][i].cpu().numpy()
+            #                 if encoder_out["encoder_padding_mask"] is not None
+            #                 else None
+            #             )
+            #             features[id.item()] = (feat[i], padding)
+            #         continue
+            hypos = task.inference_step(generator, models, sample, prefix_tokens)
+            print("hypos = ", hypos)
+            print(f"tgt_dict = {tgt_dict}")
+            exit()
+            num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
+
+            for i, sample_id in enumerate(sample["id"].tolist()):
+                speaker = None
+                # id = task.dataset(args.gen_subset).ids[int(sample_id)]
+                id = sample_id
+                toks = (
+                    sample["target"][i, :]
+                    if "target_label" not in sample
+                    else sample["target_label"][i, :]
+                )
+                target_tokens = utils.strip_pad(toks, tgt_dict.pad()).int().cpu()
+                # Process top predictions
+                hypos = process_predictions(
+                    args,
+                    hypos[i],
+                    None,
+                    tgt_dict,
+                    target_tokens,
+                    res_files,
+                    speaker,
+                    id,
+                )
+
+            wps_meter.update(num_generated_tokens)
+            t.log({"wps": round(wps_meter.avg)})
+            num_sentences += (
+                sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
+            )
+
+    wer = None
+    if args.dump_emissions:
+        emm_arr = []
+        for i in range(len(emissions)):
+            emm_arr.append(emissions[i])
+        np.save(args.dump_emissions, emm_arr)
+        logger.info(f"saved {len(emissions)} emissions to {args.dump_emissions}")
+    elif args.dump_features:
+        feat_arr = []
+        for i in range(len(features)):
+            feat_arr.append(features[i])
+        np.save(args.dump_features, feat_arr)
+        logger.info(f"saved {len(features)} emissions to {args.dump_features}")
+    else:
+        if lengths_t > 0:
+            wer = errs_t * 100.0 / lengths_t
+            logger.info(f"WER: {wer}")
+
+        # logger.info(
+        #     "| Processed {} sentences ({} tokens) in {:.1f}s ({:.2f}"
+        #     "sentences/s, {:.2f} tokens/s)".format(
+        #         num_sentences,
+        #         gen_timer.n,
+        #         gen_timer.sum,
+        #         num_sentences / gen_timer.sum,
+        #         1.0 / gen_timer.avg,
+        #     )
+        # )
+        # logger.info("| Generate {} with beam={}".format(args.gen_subset, args.beam))
+    
+    return task, wer
+
+def make_parser():
+    parser = options.get_generation_parser()
+    parser = add_asr_eval_argument(parser)
+    return parser
+
+def cli_main():
+    parser = make_parser()
+    args = options.parse_args_and_arch(parser)
+    main(args)
+    exit()
+
+def build_generator(args):
+    w2l_decoder = getattr(args, "w2l_decoder", None)
+    if w2l_decoder == "viterbi":
+        from examples.speech_recognition.w2l_decoder import W2lViterbiDecoder
+
+        return W2lViterbiDecoder(args, task.target_dictionary)
+    elif w2l_decoder == "kenlm":
+        from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder
+
+        return W2lKenLMDecoder(args, task.target_dictionary)
+    elif w2l_decoder == "fairseqlm":
+        from examples.speech_recognition.w2l_decoder import W2lFairseqLMDecoder
+
+        return W2lFairseqLMDecoder(args, task.target_dictionary)
+    elif w2l_decoder == "parlance":
+        from decoder.w2l_parlance import W2lParlance
+
+        return W2lParlance(args, task.target_dictionary)
+
+    elif w2l_decoder == "online":
+        from decoder.w2l_parlance import W2lParlanceOnlineDecoder
+
+        return W2lParlanceOnlineDecoder(args, task.target_dictionary)
+    else:
+        print(
+            "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
+        )
+
+if __name__ == "__main__":
+    cli_main()
+    exit()
+
+## for parlance online decoder test
+sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest', 
+    '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/kaist_best.pt'', 
+    '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance', 
+    '--lm-model', '/root/project/speech_server/decoder/kakao3.bin', '--lm-weight', '2', '--word-score', '-1', 
+    '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr', 
+    '--max-tokens', '4000000', '--post-process', 'letter', '--cpu']
+
+parser = make_parser()
+args = options.parse_args_and_arch(parser)
+
+use_fp16 = args.fp16
+if args.max_tokens is None and args.batch_size is None:
+    args.max_tokens = 4000000
+logger.info(args)
+
+use_cuda = torch.cuda.is_available() and not args.cpu
+use_cuda_str = 'cuda' if use_cuda else 'cpu'
+
+logger.info("| decoding with criterion {}".format(args.criterion))
+
+task = tasks.setup_task(args)
+
+logger.info("| loading model(s) from {}".format(args.path))
+models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
+    utils.split_paths(args.path, separator="\\"),
+    arg_overrides=ast.literal_eval(args.model_overrides),
+    task=task,
+    suffix=args.checkpoint_suffix,
+    strict=(args.checkpoint_shard_count == 1),
+    num_shards=args.checkpoint_shard_count,
+    state=None,
+)
+## optimize
+optimize_models(args, use_cuda, models)
+task.load_dataset(args.gen_subset, task_cfg=saved_cfg.task)
+
+# Set dictionary
+tgt_dict = task.target_dictionary
+# labels = [k for k in tgt_dict.indices.keys()]
+
+logger.info(
+    "| {} {} {} examples".format(
+        args.data, args.gen_subset, len(task.dataset(args.gen_subset))
+    )
+)
+
+generator = build_generator(args)
+
+def list_chunk(lst, n):
+    return [lst[i:i+n] for i in range(0, len(lst), n)]
+
+
+def inference_online(audio):
+    sample = dict()
+    net_input = dict()
+
+    feature = get_feature(audio)
+
+    hypos_list = []
+    sec = 3
+    feature_list = list_chunk(feature, 16000 * sec) 
+    for idx, _feat in enumerate(feature_list):
+        net_input["source"] = _feat.unsqueeze(0).to(use_cuda_str)
+        padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
+
+        net_input["padding_mask"] = padding_mask
+        sample["net_input"] = net_input
+
+        models[0].to(use_cuda_str)
+
+        # hypos = task.inference_step(generator, models, sample, None)
+
+        eos = False
+        if idx == len(feature_list) - 1:
+            eos = True
+        
+        with torch.no_grad():
+            hypos = generator.generate(models, sample, eos=eos, prefix_tokens=None, constraints=None)
+        
+        hypos_list.append(hypos)
+    
+    logger.info(f"| | hypos_list => {hypos_list}")
+    return hypos_list[-1]
+
+
+def inference(audio):
+    logger.info("| in inference func !!")
+    sample = dict()
+    net_input = dict()
+
+    feature = get_feature(audio)
+
+    net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
+    logger.info(f'feature shape = {net_input["source"].shape}')
+    padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
+
+    net_input["padding_mask"] = padding_mask
+    sample["net_input"] = net_input
+
+    models[0].to(use_cuda_str)
+
+    hypos = task.inference_step(generator, models, sample, None)
+
+    logger.info(f"| | hypos => {hypos}")
+    
+    return hypos
+
+def inference_file():
+    sample = dict()
+    net_input = dict()
+
+    feature = get_feature_to_path('/root/nas/data/kspon_with_aug/ogg/pcm_wav/test/eval_clean/KsponSpeech_E00001.ogg')
+    
+    net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
+    padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
+
+    net_input["padding_mask"] = padding_mask
+    sample["net_input"] = net_input
+
+    models[0].to(use_cuda_str)
+
+    hypos = task.inference_step(generator, models, sample, None)
+
+    logger.info(f"| | hypos => {hypos}")
+    
+    return hypos

+ 113 - 0
server.py

@@ -0,0 +1,113 @@
+from typing import Union
+from fairseq import tasks
+from fairseq.data.dictionary import Dictionary
+from fastapi import FastAPI, File, UploadFile
+from pydantic import BaseModel
+
+import logging
+import argparse
+import torch
+import torch.nn as nn
+import pickle
+import soundfile as sf
+import torch.nn.functional as F
+import yaml
+import os, sys
+import numpy as np
+
+
+# from decoder_exps.decode_common import W2V2Decoder, Wav2VecCtc
+# from wav2vecEncoder import Wav2VecCtc as CustomWav2VecCtc
+# from fairseq.models.wav2vec.wav2vec2_asr import Wav2VecCtc
+
+from inference import inference, inference_file, inference_online
+# from inference import inference_file
+
+logging.basicConfig()
+logging.root.setLevel(logging.INFO)
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+class Recording(BaseModel):
+    filename: str
+    content_type: str
+    content: list
+
+class AudioClip(BaseModel):
+    device: str
+    time: str
+    recording: Recording
+
+
+# YAML_FILE = "config/base_org.yaml"
+
+# args = dict()
+# os.path.abspath(os.path.dirname(__file__))
+# with open(YAML_FILE, 'r') as f:
+#     args.update(yaml.safe_load(f))
+
+
+###############################################################################
+## FastAPI
+###############################################################################
+
+app = FastAPI()
+
+@app.get("/")
+def root():
+    return {"message" : "Hello World!!!"}
+
+@app.post("/test")
+def post_test(audioClip: AudioClip):
+    output = ""
+    print("input audio? = ", type(audioClip.recording.content))
+    audio = np.array(audioClip.recording.content).squeeze()
+
+    print("in test func, audio = ", type(audio), audio.shape)
+
+    feats = get_feature(audio)
+    print("in test section, feats = ", type(feats), feats.shape)
+    output = inference(feats)
+
+
+    return {"output" : output}
+
+
+@app.post("/inference")
+def post_inference(audioClip: AudioClip):
+
+    output = inference(audioClip.recording.content)
+
+    return {"output" : output}
+
+@app.post("/online")
+def post_inference(audioClip: AudioClip):
+
+    output = inference_online(audioClip.recording.content)
+
+    return {"output" : output}
+
+
+@app.post("/inference_file")
+# def inference_file(file: UploadFile = File(...)):
+def post_inference_file():
+    # data = pickle.load(file)
+
+    '''
+    control formatting
+    if data.format != 'wav':
+        do_formatting()
+    '''
+
+    ## run model
+    print('in Inference Start')
+    output = ''
+    output = inference_file()
+    
+    return {"output" : output}
+
+
+
+if __name__ == '__main__':
+    print('this is main')
+    print(inference(args["wav_path"]))