first commit

This commit is contained in:
yumoqing 2024-10-21 11:04:46 +08:00
commit c954b81cea
5 changed files with 145 additions and 0 deletions

35
README.md Normal file
View File

@ -0,0 +1,35 @@
# faster-whisper
这个比openai-whisper要快
## 创建虚拟环境
```
mkdir ve
python3 -m venv ~/ve/fw
```
## 安装
```
source ~/ve/fw/bin/activate
pip install nvidia-cublas-cu12 nvidia-cudnn-cu12
export LD_LIBRARY_PATH=`python -c 'import os; import nvidia.cublas.lib; import nvidia.cudnn.lib; print(os.path.dirname(nvidia.cublas.lib.__file__) + ":" + os.path.dirname(nvidia.cudnn.lib.__file__))'`
pip install faster-whisper
```
## 运行
创建一个run.sh
```
#!/bin/sh
r=$HOME/ve/fw/bin/python
export LD_LIBRARY_PATH=`$r -c 'import os; import nvidia.cublas.lib; import nvidia.cudnn.lib; print(os.path.dirname(nvidia.cublas.lib.__file__) + ":" + os.path.dirname(nvidia.cudnn.lib.__file__))'`
$r $*
```
## 执行服务
./run.sh zmqfw.py
## 执行客户端
./run.shzmq_client.py

6
conf/config.json Normal file
View File

@ -0,0 +1,6 @@
{
"zmq_url" : "tcp://127.0.0.1:10002",
"model_dir" : "large-v3",
"device" : "cuda",
"conpute_type": "float16"
}

5
run.sh Executable file
View File

@ -0,0 +1,5 @@
#!/bin/sh
export LD_LIBRARY_PATH=/data/ymq/ve/fw/lib/python3.12/site-packages/nvidia/cublas/lib:/data/ymq/ve/fw/lib/python3.12/site-packages/nvidia/cudnn/lib
r=$HOME/ve/fw/bin/python
$r $*

42
zmq_client.py Normal file
View File

@ -0,0 +1,42 @@
import json
import os
from appPublic.dictObject import DictObject
from appPublic.zmq_reqrep import ZmqRequester
from appPublic.jsonConfig import getConfig
zmq_url = "tcp://127.0.0.1:9999"
from time import time
class ASRClient:
def __init__(self, zmq_url):
self.zmq_url = zmq_url
self.requester = ZmqRequester(self.zmq_url)
def generate(self, audio_file):
d = {
"audio_file":audio_file
}
msg = json.dumps(d)
print(f'send={msg}')
resp = self.requester.send(msg)
if resp != None:
ret = json.loads(resp)
print(f'response={ret}')
else:
print(f'response is None')
def run(self):
print(f'running {self.zmq_url}')
while True:
print('input audio_file:')
af = input()
if len(af) > 0:
self.generate(af)
print('ended ...')
if __name__ == '__main__':
workdir = os.getcwd()
config = getConfig(workdir)
asr = ASRClient(config.zmq_url or zmq_url)
asr.run()

57
zmqfw.py Normal file
View File

@ -0,0 +1,57 @@
import json
import os
from appPublic.dictObject import DictObject
from appPublic.zmq_reqrep import ZmqReplier
from appPublic.jsonConfig import getConfig
from time import time
from faster_whisper import WhisperModel
class ZmqASR:
def __init__(self):
config = getConfig()
self.model_dir = config.model_dir or "large-v3"
self.device = config.device or "cpu"
self.zmq_url = config.zmq_url
self.compute_type=config.compute_type or "float16"
self.model = WhisperModel(self.model_dir,
device=self.device,
compute_type=self.compute_type)
self.replier = ZmqReplier(self.zmq_url, self.generate)
def run(self):
print(f'running {self.zmq_url}')
self.replier._run()
print('ended ...')
def generate(self, d):
msg= d.decode('utf-8')
data = DictObject(**json.loads(msg))
t1 = time()
segs, info = self.model.transcribe(data.audio_file)
t2 = time()
d = DictObject(**{
'segments':[],
'language':info.language
})
d.content = ''
for s in segs:
seg = {
"start":s.start,
"end":s.end,
"text":s.text
}
d.segments.append(seg)
d.content += s.text
d.time_cost = t2 - t1
print(f'{d}')
return json.dumps(d)
if __name__ == '__main__':
workdir = os.getcwd()
config = getConfig(workdir)
print(f'{config=}')
asr = ZmqASR()
print('here')
asr.run()