This commit is contained in:
yumoqing 2024-11-13 13:21:08 +08:00
parent 0c967da4c5
commit 4b9146816d
10 changed files with 160 additions and 0 deletions

View File

@ -1,2 +1,35 @@
# moonshineASR
## dependents
[requirements.txt](requirements.txt) list all dependents modules
use the following command to install the dependents modules
```
mspip install -r requirements.txt
```
## install moonshine service
```
cd script
sudo ./install.sh
```
## make a virtual python environment
```
python3 -m venv ~/ve/ms
cat > ~/bin/mspip <<EOF
#!/usr/bin/bash
~/ve/ms/bin/python $*
EOF
cat > ~/bin/mspy <<EOF
#!/usr/bin/bash
~/ve/ms/bin/python $*
EOF
chmod +x ~/bin/ms*
```

0
app/README.md Normal file
View File

38
app/moonshine.py Normal file
View File

@ -0,0 +1,38 @@
import os
os.environ['KERAS_BACKEND'] = 'torch'
from transformers import AutoModelForSpeechSeq2Seq, AutoConfig, PreTrainedTokenizerFast
import torchaudio
import sys
import time
import torch
from appPublic.worker import awaitify
from appPublic.jsonConfig import getConfig
from ahserver.serverenv import ServerEnv
from ahserver.webapp import webapp
class Moonshine:
def __init__(self, modelname):
# default modelname 'usefulsensors/moonshine-tiny'
if modelname is None:
modelname = 'usefulsensors/moonshine-tiny'
self.model = AutoModelForSpeechSeq2Seq.from_pretrained(modelname,
trust_remote_code=True)
self.tokenizer = PreTrainedTokenizerFast.from_pretrained(modelname)
print(tokenizer.decode(tokens[0], skip_special_tokens=True))
def inference(self, audiofile):
audio, sr = torchaudio.load(audiofile)
if sr != 16000:
audio = torchaudio.functional.resample(audio, sr, 16000)
tokens = self.model(audio)
return tokenizer.decode(tokens[0], skip_special_tokens=True)
def main():
config = getConfig()
modelname = config.modelname
m = Moonshine(modelname)
g = ServerEnv()
g.inference = awaitify(m.inference)
if __name__ == '__main__':
webapp(main)

32
app/test.py Normal file
View File

@ -0,0 +1,32 @@
import time
import requests
import base64
def file2b64(file_path):
# 读取文件内容
with open(file_path, 'rb') as file:
file_content = file.read()
# 将文件内容转换为Base64编码
base64_encoded_data = base64.b64encode(file_content)
# 将Base64编码的数据转换为字符串
base64_encoded_str = base64_encoded_data.decode('utf-8')
return base64_encoded_str
while True:
print('prompt:')
p = input()
print('input image path:')
i = input()
if p == '' or i == '':
continue
t1 = time.time()
ret = requests.post('http://pd4e.com:10090/api',
data={
'prompt':p,
'image':file2b64(i)
})
t2 = time.time()
print(ret.text, t2 - t1, 'seconds')

0
conf/README.md Normal file
View File

48
conf/config.json Executable file
View File

@ -0,0 +1,48 @@
{
"password_key":"!@#$%^&*(*&^%$QWERTYUIqwertyui234567",
"modelname":"Qwen/Qwen2-VL-2B-Instruct",
"modelname":"Qwen/Qwen2-VL-7B-Instruct",
"logger":{
"name":"qwenvl",
"levelname":"info",
"logfile":"$[workdir]$/logs/sage.log"
},
"filesroot":"$[workdir]$/files",
"website":{
"paths":[
["$[workdir]$/wwwroot",""]
],
"client_max_size":10000,
"host":"0.0.0.0",
"port":10090,
"coding":"utf-8",
"indexes":[
"index.html",
"index.tmpl",
"index.ui",
"index.dspy",
"index.md"
],
"startswiths":[
{
"leading":"/idfile",
"registerfunction":"idFileDownload"
}
],
"processors":[
[".dspy","dspy"],
[".md","md"]
],
"session_max_time":3000,
"session_issue_time":2500,
"session_redis_notuse":{
"url":"redis://127.0.0.1:6379"
}
},
"langMapping":{
"zh-Hans-CN":"zh-cn",
"zh-CN":"zh-cn",
"en-us":"en",
"en-US":"en"
}
}

4
requirements.txt Normal file
View File

@ -0,0 +1,4 @@
useful-moonshine@git+https://github.com/usefulsensors/moonshine.git
git+https://git.kaiyuancloud.cn/yumoqing/apppublic
git+https://git.kaiyuancloud.cn/yumoqing/ahserver

3
script/install.sh Executable file
View File

@ -0,0 +1,3 @@
sudo cp moonshine.service /etc/systemd/system
sudo systemctl enable moonshine.service
sudo systemctl start moonshine

0
wwwroot/README.md Normal file
View File

2
wwwroot/api/index.dspy Normal file
View File

@ -0,0 +1,2 @@
return await inference(params_kw.audiofile)