Explorar o código

UPDATE client

ryu %!s(int64=2) %!d(string=hai) anos
pai
achega
5a3fd17d5c

+ 64 - 0
client/component/player.py

@@ -0,0 +1,64 @@
+from PyQt5.QtMultimedia import QMediaPlaylist, QMediaContent, QMediaPlayer
+from PyQt5.QtCore import Qt, QUrl
+
+import soundfile as sf
+from collections import deque
+import requests
+import json
+from config.config import Config
+
+SERVER_URL = Config.CLIENT_CONFIG["server_url"]
+INFERENCE_STR = Config.CLIENT_CONFIG["inference_path"]
+
+class Player:
+    def __init__(self, parent):
+        self.parent = parent
+
+        self.player = QMediaPlayer()
+        self.player.currentMediaChanged.connect(self.mediaChanged)
+        self.player.durationChanged.connect(self.durationChanged)
+        self.player.positionChanged.connect(self.positionChanged)
+
+        self.playlist = QMediaPlaylist()
+
+        self.playlist_data = []
+        self.sound_prev_idx = 0
+
+    def play(self, playlists, startRow=0, option=QMediaPlaylist.Sequential ):
+        print(f'in player player.state = {self.player.state()}, {QMediaPlayer.PausedState}')
+
+        if self.player.state() == QMediaPlayer.PausedState:
+            self.player.play()
+        else:
+            self.createPlaylist(playlists, startRow, option)
+            self.playlist.setCurrentIndex(startRow)
+            self.player.play()
+    
+    def stop(self):
+        self.player.stop()
+    
+    def createPlaylist(self, playlists, startRow=0, option=QMediaPlaylist.Sequential):
+        self.playlist.clear()
+
+        for path in playlists:
+            url = QUrl.fromLocalFile(path)
+            self.playlist.addMedia(QMediaContent(url))
+
+        self.player.setPlaylist(self.playlist)
+        
+        self.playlist.setPlaybackMode(option)
+
+    def upateVolume(self, vol):
+        self.player.setVolume(vol)
+ 
+    def mediaChanged(self, e):
+        self.sound_prev_idx = 0
+        self.parent.updateMediaChanged(self.playlist.currentIndex()) 
+ 
+    def durationChanged(self, msec):
+        if msec>0:
+            self.parent.updateDurationChanged(self.playlist.currentIndex(), msec)
+ 
+    def positionChanged(self, msec):
+        if msec>0:
+            self.parent.updatePositionChanged(self.playlist.currentIndex(), msec)

+ 337 - 0
client/demo.py

@@ -0,0 +1,337 @@
+import sys
+from PyQt5.QtWidgets import (
+    QApplication, QWidget, QTabWidget, QVBoxLayout, 
+    QPushButton, QTextEdit, QHBoxLayout, QFileDialog,
+    QTextBrowser, QGroupBox, QButtonGroup, QSlider,
+    QComboBox,
+)
+from PyQt5 import QtCore, QtWidgets
+from PyQt5.QtGui import QFont, QTextCursor
+from PyQt5.QtMultimedia import QMediaPlaylist
+
+from record import Record
+from component.player import Player
+from summary.infer import summary, summary_with_model
+
+from time import time, sleep
+import soundfile as sf
+
+class Tab1Widget(QWidget):
+    def __init__(self, add_signal):
+        super().__init__()
+        self.text = ''
+        self.add_signal = add_signal
+
+        ## for player
+        self.player = Player(self)
+        self.playlist = ['/Users/user/Documents/HDC/RnD/Project/qt/resource/1. KsponSpeech_sample.wav']
+        self.selectedList = [0]
+        self.playOption = QMediaPlaylist.Sequential
+
+        wav, sr = sf.read(self.playlist[0])
+        self.playlist_data = [[wav, sr]]
+
+        self.sound_prev = 0
+
+        self.initUI()
+
+        self.record_thread = RecordWorker(self.add_signal)
+        self.record_file_thread = FileRecordWorker(self.add_signal, self.playlist_data)
+
+        self.btn_run.clicked.connect(self.clicked_run_btn)
+        self.btn_stop.clicked.connect(self.clicked_stop_btn)
+        self.btn_clear.clicked.connect(self.clicked_clear_btn)
+        
+
+    def initUI(self):
+        self.btn_run = QPushButton('Run', self)
+        self.btn_stop = QPushButton('Stop', self)
+        self.btn_clear = QPushButton('Clear', self)
+
+        self.tb = QTextEdit()
+        self.tb.setReadOnly(True)
+        self.tb.setAcceptRichText(True)
+        font = QFont()
+        font.setPointSize(80)
+        self.tb.setFont(font)
+        self.tb.setText(self.text)
+
+        # Play Control
+        self.gb = QGroupBox('Play Control')
+
+        grp = QButtonGroup(self)
+        play_button_layout = QHBoxLayout()
+        play_button_text = ['Change', '▶','■'] # ['◀◀', '▶', '⏸', '▶▶', '■']
+        for i in range(len(play_button_text)):
+            btn = QPushButton(play_button_text[i], self)
+            grp.addButton(btn, i)
+            play_button_layout.addWidget(btn)
+        grp.buttonClicked[int].connect(self.clicked_play_btn)
+
+        # Volume
+        self.slider = QSlider(QtCore.Qt.Horizontal, self)
+        self.slider.setRange(0,100)
+        self.slider.setValue(50)
+        self.slider.valueChanged[int].connect(self.volumeChanged)
+        play_button_layout.addWidget(self.slider)
+        self.gb.setLayout(play_button_layout)
+
+        self.vbox = QVBoxLayout()
+        self.vbox.addWidget(self.gb)
+        self.vbox.addWidget(self.btn_run)
+        self.vbox.addWidget(self.btn_stop)
+        self.vbox.addWidget(self.btn_clear)
+        self.vbox.addWidget(self.tb)
+        
+
+        self.setLayout(self.vbox)
+
+    def clicked_run_btn(self):
+        self.record_thread.start()
+        # self.text += '\nrun!!\n'
+        # self.tb.setText(self.text)
+
+        # self.addText.emit(self.text)
+
+    def clicked_play_btn(self, id):
+        if id==0: #change
+            self.changeList()
+        elif id==1: #▶
+            self.player.play(self.playlist, self.selectedList[0], self.playOption)
+            self.record_file_thread.start()
+        elif id==2: #■
+            self.player.stop()
+            self.record_file_thread.stop()
+
+    def clicked_stop_btn(self):
+        self.record_thread.stop()
+    
+    def clicked_clear_btn(self):
+        # self.record_thread = RecordWorker(self.add_signal)
+        self.record_thread.clear()
+        self.add_signal.emit('<cls>')
+
+    def volumeChanged(self):
+        self.player.upateVolume(self.slider.value())
+
+    def clear_text_browser(self):
+        self.text = ''
+        # self.tb.setText(self.text)
+        self.add_signal.emit('<cls>')
+
+    def changeList(self):
+        fname = QFileDialog.getOpenFileName(self, 'Select one file to open', './resource')
+        if fname[0]:
+            self.clear_text_browser()
+            wav, sr = sf.read(fname[0])
+            self.playlist = [fname[0]]
+            self.playlist_data = [[wav, sr]]
+            self.record_file_thread = FileRecordWorker(self.add_signal, self.playlist_data)
+
+    def updateMediaChanged(self, index):
+        print('index:',index)
+        pass
+        # if index>=0:
+        #     self.table.selectRow(index)
+
+    def updateDurationChanged(self, index, msec):        
+        print('index:',index, 'duration:', msec)
+        # self.pbar = self.table.cellWidget(index, 1)
+        # if self.pbar:
+        #     self.pbar.setRange(0, msec)       
+ 
+    def updatePositionChanged(self, index, msec):
+        print('index:',index, 'position:', msec)
+
+        # self.pbar = self.table.cellWidget(index, 1)
+        # if self.pbar:
+        #     self.pbar.setValue(msec)
+
+class Tab2Widget(QWidget):
+    def __init__(self):
+        super().__init__()
+
+        self.summary_model_idx = 0
+
+        self.initUI()
+        self.btn_convert.clicked.connect(self.clicked_convert_btn)
+        self.btn_open_file.clicked.connect(self.clicked_open_file_btn)
+        self.btn_clear.clicked.connect(self.clicked_clear_btn)
+
+    def initUI(self):
+
+        self.grid = QHBoxLayout()
+        self.btn_open_file = QPushButton('Open File')
+        self.btn_clear = QPushButton('Clear')
+        self.grid.addWidget(self.btn_open_file)
+        self.grid.addWidget(self.btn_clear)
+
+        self.combo = QComboBox(self)
+        self.combo.addItems(["General", "Estate"])
+        self.combo.currentTextChanged.connect(self.on_combobox_func) 
+        self.grid.addWidget(self.combo)
+
+
+        self.tb_source = QTextEdit()
+        self.tb_source.setAcceptRichText(True)
+
+        self.btn_convert = QPushButton('Convert', self)
+
+        self.tb_convert = QTextBrowser()
+        # self.tb_convert.setReadOnly(True)
+        self.tb_convert.setAcceptRichText(True)
+        font = QFont()
+        font.setPointSize(80)
+        self.tb_convert.setFont(font)
+        # self.tb_convert.setSource(QtCore.QUrl.fromLocalFile("test2.html"))
+
+        self.vbox = QVBoxLayout()
+        self.vbox.addLayout(self.grid, 0)
+        self.vbox.addWidget(self.tb_source, 1)
+        self.vbox.addWidget(self.btn_convert, 2)
+        self.vbox.addWidget(self.tb_convert, 3)
+
+        self.setLayout(self.vbox)
+
+    def on_combobox_func(self, text):
+        if text == 'General':
+            self.summary_model_idx = 0
+        else:
+            self.summary_model_idx = 1
+
+    @QtCore.pyqtSlot(str)
+    def update_source_text(self, content):
+        self.tb_source.setText(content)
+
+    def clicked_convert_btn(self):
+        text = self.tb_source.toPlainText()
+        # text = summary(text)
+        text = summary_with_model(text, self.summary_model_idx)
+        self.tb_convert.setText(text)
+
+    def clicked_open_file_btn(self):
+        fname = QFileDialog.getOpenFileName(self, 'Open file', './script')
+        if fname[0]:
+            f = open(fname[0], 'r')
+
+            with f:
+                data = f.read()
+                self.tb_source.setText(data)
+
+    def clicked_clear_btn(self):
+        self.tb_source.setText("")
+
+
+class MyApp(QtWidgets.QMainWindow):
+    addText = QtCore.pyqtSignal(str)
+
+    def __init__(self, parent=None):
+        super(MyApp, self).__init__(parent)
+        self.text = ''
+        self.blank_cnt = 0
+        self.blank_max = 2
+
+        self.initUI()
+
+    def initUI(self):
+        self.tab1 = Tab1Widget(self.addText)
+        self.tab2 = Tab2Widget()
+
+        # tab1.addText.connect(tab2.update_source_text)
+        # tab1.addText.connect(tab2.update_source_text)
+        # self.addText.connect(tab2.update_source_text)
+        self.addText.connect(self.update_text)
+
+        tabs = QTabWidget()
+        tabs.addTab(self.tab1, '음성 인식')
+        tabs.addTab(self.tab2, '요약')
+
+        self.setCentralWidget(tabs)
+
+        # vbox = QVBoxLayout()
+        # vbox.addWidget(tabs)
+
+        # self.setLayout(vbox)
+
+        self.setWindowTitle('BTS')
+        self.setGeometry(300, 300, 900, 500)
+        self.show()
+
+    @QtCore.pyqtSlot(str)
+    def update_text(self, content):
+        if content == '<cls>':
+            self.text = '' 
+        elif len(content) == 0 and len(self.text) and self.text[-1] != "\n":
+            self.blank_cnt += 1
+            if self.blank_cnt >= self.blank_max:
+                self.blank_cnt = 0
+                self.text += '\n'
+        else:
+            self.text += content
+
+        self.tab1.tb.setText(self.text)
+        self.tab1.tb.moveCursor(QTextCursor.End)
+
+        self.tab2.tb_source.setText(self.text)
+        self.tab2.tb_source.moveCursor(QTextCursor.End)
+
+def list_chunk(lst, n):
+    return [lst[i:i+n] for i in range(0, len(lst), n)]
+
+class RecordWorker(QtCore.QThread):
+
+    def __init__(self, add_signal):
+        super().__init__()
+        self.power = False
+        self.record = Record()
+        self.add_signal = add_signal
+    
+    def run(self):
+        self.power = True
+
+        
+        while self.power == True:
+            self.record.record_unit(self.add_signal.emit)
+
+        # self.record.record_unit(self.add_signal.emit)
+
+    def stop(self):
+        self.power = False
+
+    def clear(self):
+        self.record.clear()
+
+class FileRecordWorker(QtCore.QThread):
+    def __init__(self, add_signal, audio_data):
+        super().__init__()
+        self.power = False
+        self.record = Record()
+        self.add_signal = add_signal
+        self.audio_data = audio_data
+
+    def run(self):
+        self.power = True
+        sec = 1
+
+        audio, sr = self.audio_data[0]
+        sample_list = list_chunk(audio, 16000 * sec) 
+
+        start_time = time()
+        for idx, sample in enumerate(sample_list):
+            if self.power:
+                now = time()
+                length = len(sample)/sr
+
+                print('time = ', start_time + length + (idx+1), 'now = ', now, 'diff = ', (start_time + length + (idx+1)) - now)
+                pivot = (start_time + length + (idx+1)) - now
+                if pivot > 0:
+                    sleep(pivot)
+                self.record.record_file(sample, self.add_signal.emit)
+
+    def stop(self):
+        self.power = False
+
+if __name__ == '__main__':
+    app = QApplication(sys.argv)
+    ex = MyApp()
+    sys.exit(app.exec_())

+ 68 - 0
client/summary/infer.py

@@ -0,0 +1,68 @@
+import torch
+from transformers import PreTrainedTokenizerFast
+from transformers.models.bart import BartForConditionalGeneration
+import os
+
+root_path = os.getcwd()
+
+summary_general = 'summary/kobart_summary'
+summary_estate = 'summary/kobart_summary_gen'
+
+model_estate = BartForConditionalGeneration.from_pretrained(os.path.join(root_path, summary_estate))
+model_gen = BartForConditionalGeneration.from_pretrained(os.path.join(root_path, summary_general))
+#tokenizer = PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-base-v1')
+# tokenizer = PreTrainedTokenizerFast(tokenizer_file = 'tokenizer.json')
+tokenizer = PreTrainedTokenizerFast(tokenizer_file = os.path.join(root_path, 'summary/tokenizer.json'))
+
+# text = input()
+
+import pandas as pd
+
+def infer():
+    test = pd.read_csv("test_label.tsv", sep = '\t', encoding = "utf-8")
+    for text in test["text"]:
+        if text:
+            input_ids = tokenizer.encode(text)
+            input_ids = torch.tensor(input_ids)
+            input_ids = input_ids.unsqueeze(0)
+            output = model.generate(input_ids, eos_token_id=1, max_length=512, num_beams=7)
+            output = tokenizer.decode(output[0], skip_special_tokens=True)
+
+            print(">>>"+"원문 ----   "+text)
+            print("\n"+ "######################################################################" + "\n")
+            print(">>>"+"요약 ----   "+output)
+            print("\n"+ "######################################################################" + "\n")
+
+def summary(text):
+    input_ids = tokenizer.encode(text)
+    input_ids = torch.tensor(input_ids)
+    input_ids = input_ids.unsqueeze(0)
+    output = model_estate.generate(input_ids, eos_token_id=1, max_length=512, num_beams=7)
+    output = tokenizer.decode(output[0], skip_special_tokens=True)
+
+    return output
+
+def summary_with_model(text, idx):
+    input_ids = tokenizer.encode(text)
+    input_ids = torch.tensor(input_ids)
+    input_ids = input_ids.unsqueeze(0)
+    if idx == 0:
+        output = model_estate.generate(input_ids, eos_token_id=1, max_length=512, num_beams=7)
+    else:
+        output = model_gen.generate(input_ids, eos_token_id=1, max_length=512, num_beams=7)
+    output = tokenizer.decode(output[0], skip_special_tokens=True)
+
+    return output
+
+
+if __name__ == '__main__':
+    print(summary('사장님 분위기 좀 알려주세요. 요즘에 매매 전세 전반적인 분위기가 어때요'\
+'그래도 이제 수능 끝나고 좀 나아져야 되는데 매물은 없고 지금 현재 손님도 안 찾고 그래요'\
+'물건을 좀 찾기는 하는데 물건이 안 나온단 얘기죠'\
+'전세도 그런가요'\
+'전세도그래요 '\
+'전세나 매물이 많이 나오지 않으세요.'\
+'예 안 나옵니다. '\
+'찾는 사람은 간혹 있어도 전세가가 약간 올랐어요.'\
+'알겠습니다.' \
+'사장님 시세는 그대로 하겠습니다. 감사합니다.'))

+ 55 - 0
client/summary/kobart_summary/config.json

@@ -0,0 +1,55 @@
+{
+  "_name_or_path": "gogamza/kobart-base-v2",
+  "activation_dropout": 0.0,
+  "activation_function": "gelu",
+  "add_bias_logits": false,
+  "add_final_layer_norm": false,
+  "architectures": [
+    "BartForConditionalGeneration"
+  ],
+  "attention_dropout": 0.0,
+  "author": "Hyeokout",
+  "bos_token_id": 1,
+  "classif_dropout": 0.1,
+  "classifier_dropout": 0.1,
+  "d_model": 768,
+  "decoder_attention_heads": 16,
+  "decoder_ffn_dim": 3072,
+  "decoder_layerdrop": 0.0,
+  "decoder_layers": 6,
+  "decoder_start_token_id": 1,
+  "do_blenderbot_90_layernorm": false,
+  "dropout": 0.1,
+  "encoder_attention_heads": 16,
+  "encoder_ffn_dim": 3072,
+  "encoder_layerdrop": 0.0,
+  "encoder_layers": 6,
+  "eos_token_id": 1,
+  "extra_pos_embeddings": 2,
+  "force_bos_token_to_be_generated": false,
+  "forced_eos_token_id": 1,
+  "gradient_checkpointing": false,
+  "id2label": {
+    "0": "NEGATIVE",
+    "1": "POSITIVE"
+  },
+  "init_std": 0.02,
+  "is_encoder_decoder": true,
+  "kobart_version": 2.0,
+  "label2id": {
+    "NEGATIVE": 0,
+    "POSITIVE": 1
+  },
+  "max_position_embeddings": 1026,
+  "model_type": "bart",
+  "normalize_before": false,
+  "normalize_embedding": true,
+  "num_hidden_layers": 6,
+  "pad_token_id": 3,
+  "scale_embedding": false,
+  "static_position_embeddings": false,
+  "tokenizer_class": "PreTrainedTokenizerFast",
+  "transformers_version": "4.8.2",
+  "use_cache": true,
+  "vocab_size": 30000
+}

+ 55 - 0
client/summary/kobart_summary_gen/config.json

@@ -0,0 +1,55 @@
+{
+  "_name_or_path": "gogamza/kobart-base-v2",
+  "activation_dropout": 0.0,
+  "activation_function": "gelu",
+  "add_bias_logits": false,
+  "add_final_layer_norm": false,
+  "architectures": [
+    "BartForConditionalGeneration"
+  ],
+  "attention_dropout": 0.0,
+  "author": "hyeokout",
+  "bos_token_id": 1,
+  "classif_dropout": 0.1,
+  "classifier_dropout": 0.1,
+  "d_model": 768,
+  "decoder_attention_heads": 16,
+  "decoder_ffn_dim": 3072,
+  "decoder_layerdrop": 0.0,
+  "decoder_layers": 6,
+  "decoder_start_token_id": 1,
+  "do_blenderbot_90_layernorm": false,
+  "dropout": 0.1,
+  "encoder_attention_heads": 16,
+  "encoder_ffn_dim": 3072,
+  "encoder_layerdrop": 0.0,
+  "encoder_layers": 6,
+  "eos_token_id": 1,
+  "extra_pos_embeddings": 2,
+  "force_bos_token_to_be_generated": false,
+  "forced_eos_token_id": 1,
+  "gradient_checkpointing": false,
+  "id2label": {
+    "0": "NEGATIVE",
+    "1": "POSITIVE"
+  },
+  "init_std": 0.02,
+  "is_encoder_decoder": true,
+  "kobart_version": 2.0,
+  "label2id": {
+    "NEGATIVE": 0,
+    "POSITIVE": 1
+  },
+  "max_position_embeddings": 1026,
+  "model_type": "bart",
+  "normalize_before": false,
+  "normalize_embedding": true,
+  "num_hidden_layers": 6,
+  "pad_token_id": 3,
+  "scale_embedding": false,
+  "static_position_embeddings": false,
+  "tokenizer_class": "PreTrainedTokenizerFast",
+  "transformers_version": "4.8.2",
+  "use_cache": true,
+  "vocab_size": 30000
+}

A diferenza do arquivo foi suprimida porque é demasiado grande
+ 19 - 0
client/summary/test_label.tsv


A diferenza do arquivo foi suprimida porque é demasiado grande
+ 0 - 0
client/summary/tokenizer.json


+ 9 - 6
decoder/w2l_parlance.py

@@ -6,13 +6,12 @@ from examples.speech_recognition.w2l_decoder import W2lDecoder
 class W2lParlance(object):
     def __init__(self, args, tgt_dict):
         self.tgt_dict = [k for k in tgt_dict.indices.keys()]
-        lm_path = "/root/kakao3.arpa"
-        alpha = 5
-        print('lm path, alpha = ', lm_path, alpha)
+
+        print('lm path, alpha = ', getattr(args, "kenlm_model", None), float(getattr(args, "lm_weight", 0.5 )))
         self.decoder = CTCBeamDecoder (
             self.tgt_dict,
-            model_path = lm_path,
-            alpha = alpha,
+            model_path = getattr(args, "kenlm_model", None),
+            alpha = float(getattr(args, "lm_weight", 0.5 )),
             beta = 10,
             # beta = float(getattr(args, "beta", 0 )),
             cutoff_top_n = int(getattr(args, "cutoff_top_n", 40 )),
@@ -23,6 +22,9 @@ class W2lParlance(object):
             log_probs_input=False
         )
 
+        self.online = True
+        # self.online = False
+
     def generate(self, models, sample, **unused):
         """Generate a batch of inferences."""
         # model.forward normally channels prev_output_tokens into the decoder
@@ -49,7 +51,8 @@ class W2lParlance(object):
         return "".join([vocab[x] for x in tokens[0:seq_len]])
     
     def decode(self, emissions_softmax):
-        emissions_softmax = emissions_softmax[:,-75:-25,:]
+        if self.online:
+            emissions_softmax = emissions_softmax[:,-75:-25,:]
 
         beam_results, beam_scores, timesteps, out_lens = self.decoder.decode(emissions_softmax)
         

+ 84 - 11
inference.py

@@ -14,6 +14,8 @@ from fairseq.logging.meters import StopwatchMeter, TimeMeter
 import soundfile as sf
 import torch.nn.functional as F
 
+import glob
+
 from ctcdecode import CTCBeamDecoder
 
 logging.basicConfig()
@@ -537,17 +539,47 @@ def build_generator(args):
             "only flashlight decoders with (viterbi, kenlm, fairseqlm) options are supported at the moment"
         )
 
-if __name__ == "__main__":
-    cli_main()
-    exit()
+# if __name__ == "__main__":
+#     cli_main()
+#     exit()
 
 ## for parlance online decoder test
+# sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest', 
+#     '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt', 
+#     '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance', 
+#     '--lm-model', '/root/project/speech_server/decoder/kakao3.bin', '--lm-weight', '2', '--word-score', '-1', 
+#     '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr', 
+#     '--max-tokens', '4000000', '--post-process', 'letter', '--cpu']
+
+# sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest', 
+#     '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt', 
+#     '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance', 
+#     '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '5', '--word-score', '-1', 
+#     '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr', 
+#     '--max-tokens', '4000000', '--post-process', 'letter']
+
+# sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest', 
+# '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/estate_cer20.pt', 
+# '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'viterbi', 
+# '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '5', '--word-score', '-1', 
+# '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr', 
+# '--max-tokens', '4000000', '--post-process', 'letter']
+
+
+# sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest', 
+# '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/kaist_tmp.pt', 
+# '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance', 
+# '--lm-model', '/root/nas/models/lm/n_gram_correct.bin', '--lm-weight', '0', '--word-score', '-1', 
+# '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr', 
+# '--max-tokens', '4000000', '--post-process', 'letter']
+
+## no lm
 sys.argv = ['/root/project/speech_server/inference.py', '/root/fairseq/examples/wav2vec/manifest', 
-    '--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/kaist_best.pt'', 
-    '--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance', 
-    '--lm-model', '/root/project/speech_server/decoder/kakao3.bin', '--lm-weight', '2', '--word-score', '-1', 
-    '--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr', 
-    '--max-tokens', '4000000', '--post-process', 'letter', '--cpu']
+'--task', 'audio_finetuning', '--nbest', '1', '--path', '/root/nas/models/finetune/estate_cer20.pt', 
+'--gen-subset', 'test', '--results-path', '/root/outputs', '--w2l-decoder', 'parlance', 
+'--word-score', '-1', 
+'--sil-weight', '0', '--criterion', 'ctc', '--labels', 'ltr', 
+'--max-tokens', '4000000', '--post-process', 'letter']
 
 parser = make_parser()
 args = options.parse_args_and_arch(parser)
@@ -628,7 +660,6 @@ def inference_online(audio):
 
 
 def inference(audio):
-    logger.info("| in inference func !!")
     sample = dict()
     net_input = dict()
 
@@ -645,16 +676,58 @@ def inference(audio):
 
     hypos = task.inference_step(generator, models, sample, None)
 
+    if args.w2l_decoder == 'viterbi':
+        for hypo in hypos[: min(len(hypos), args.nbest)]:
+            hyp_pieces = tgt_dict.string(hypo[0]["tokens"].int().cpu())
+
+            if "words" in hypo:
+                hyp_words = " ".join(hypo["words"])
+            else:
+                hyp_words = post_process(hyp_pieces, args.post_process)
+
+            hypos = hyp_words
+
     logger.info(f"| | hypos => {hypos}")
     
     return hypos
 
-def inference_file():
+def inference_file(file_list):
     sample = dict()
     net_input = dict()
 
-    feature = get_feature_to_path('/root/nas/data/kspon_with_aug/ogg/pcm_wav/test/eval_clean/KsponSpeech_E00001.ogg')
+    for _file in file_list:
+        feature = get_feature_to_path(_file)
+        net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
+        padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
+
+        net_input["padding_mask"] = padding_mask
+        sample["net_input"] = net_input
+
+        models[0].to(use_cuda_str)
+
+        logger.info(f'feature = {sample["net_input"]["source"].shape}')
+        hypos = task.inference_step(generator, models, sample, None)
+
+        if args.w2l_decoder == 'viterbi':
+            for hypo in hypos[: min(len(hypos), args.nbest)]:
+                hyp_pieces = tgt_dict.string(hypo[0]["tokens"].int().cpu())
+
+                if "words" in hypo:
+                    hyp_words = " ".join(hypo["words"])
+                else:
+                    hyp_words = post_process(hyp_pieces, args.post_process)
+
+                hypos = hyp_words
+
+        
+        logger.info(f"| | file => {_file}")
+        logger.info(f"| | hypos => {hypos}")
+
+    return ''
+
+    feature = get_feature_to_path('/root/nas/data/estate_114/ogg/wav/202001/02/93687_5009_5009_99024777300_20200102103742.ogg')
     
+
     net_input["source"] = feature.unsqueeze(0).to(use_cuda_str)
     padding_mask = torch.BoolTensor(net_input["source"].size(1)).fill_(False).unsqueeze(0).to(use_cuda_str)
 

Algúns arquivos non se mostraron porque demasiados arquivos cambiaron neste cambio