50 lines
1.3 KiB
Python
50 lines
1.3 KiB
Python
import json
|
|
import os
|
|
|
|
from appPublic.worker import awaitify
|
|
from appPublic.jsonConfig import getConfig
|
|
from ahserver.serverenv import ServerEnv
|
|
from ahserver.webapp import webapp
|
|
|
|
from time import time
|
|
from faster_whisper import WhisperModel
|
|
|
|
class FWASR:
|
|
def __init__(self):
|
|
config = getConfig()
|
|
self.modelname = config.modelname or "large-v3"
|
|
self.device = config.device or "cpu"
|
|
self.compute_type=config.compute_type or "float16"
|
|
self.model = WhisperModel(self.modelname,
|
|
device=self.device,
|
|
compute_type=self.compute_type)
|
|
|
|
def inference(self, audiofile):
|
|
t1 = time()
|
|
segs, info = self.model.transcribe(audio_file)
|
|
t2 = time()
|
|
d = DictObject(**{
|
|
'segments':[],
|
|
'language':info.language
|
|
})
|
|
d.content = ''
|
|
for s in segs:
|
|
seg = {
|
|
"start":s.start,
|
|
"end":s.end,
|
|
"text":s.text
|
|
}
|
|
d.segments.append(seg)
|
|
d.content += s.text
|
|
d.time_cost = t2 - t1
|
|
return d
|
|
|
|
def init_func():
|
|
asr = FWASR()
|
|
g = ServerEnv()
|
|
g.inference = awaitify(asr.inference)
|
|
|
|
if __name__ == '__main__':
|
|
webapp(init_func)
|
|
|