2024-10-21 11:04:46 +08:00
|
|
|
import json
|
|
|
|
import os
|
|
|
|
|
2024-11-13 17:58:47 +08:00
|
|
|
from appPublic.worker import awaitify
|
2024-10-21 11:04:46 +08:00
|
|
|
from appPublic.jsonConfig import getConfig
|
2024-11-13 17:58:47 +08:00
|
|
|
from ahserver.serverenv import ServerEnv
|
|
|
|
from ahserver.webapp import webapp
|
2024-10-21 11:04:46 +08:00
|
|
|
|
|
|
|
from time import time
|
|
|
|
from faster_whisper import WhisperModel
|
|
|
|
|
2024-11-13 17:58:47 +08:00
|
|
|
class FWASR:
|
2024-10-21 11:04:46 +08:00
|
|
|
def __init__(self):
|
|
|
|
config = getConfig()
|
2024-11-13 17:58:47 +08:00
|
|
|
self.modelname = config.modelname or "large-v3"
|
2024-10-21 11:04:46 +08:00
|
|
|
self.device = config.device or "cpu"
|
|
|
|
self.compute_type=config.compute_type or "float16"
|
2024-11-13 17:58:47 +08:00
|
|
|
self.model = WhisperModel(self.modelname,
|
2024-10-21 11:04:46 +08:00
|
|
|
device=self.device,
|
|
|
|
compute_type=self.compute_type)
|
|
|
|
|
2024-11-13 17:58:47 +08:00
|
|
|
def inference(self, audiofile):
|
2024-10-21 11:04:46 +08:00
|
|
|
t1 = time()
|
2024-11-13 17:58:47 +08:00
|
|
|
segs, info = self.model.transcribe(audio_file)
|
2024-10-21 11:04:46 +08:00
|
|
|
t2 = time()
|
|
|
|
d = DictObject(**{
|
|
|
|
'segments':[],
|
|
|
|
'language':info.language
|
|
|
|
})
|
|
|
|
d.content = ''
|
|
|
|
for s in segs:
|
|
|
|
seg = {
|
|
|
|
"start":s.start,
|
|
|
|
"end":s.end,
|
|
|
|
"text":s.text
|
|
|
|
}
|
|
|
|
d.segments.append(seg)
|
|
|
|
d.content += s.text
|
|
|
|
d.time_cost = t2 - t1
|
2024-11-13 17:58:47 +08:00
|
|
|
return d
|
|
|
|
|
|
|
|
def init_func():
|
|
|
|
asr = FWASR()
|
|
|
|
g = ServerEnv()
|
|
|
|
g.inference = awaitify(asr.inference)
|
2024-10-21 11:04:46 +08:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2024-11-13 17:58:47 +08:00
|
|
|
webapp(init_func)
|
|
|
|
|