This commit is contained in:
yumoqing 2025-05-25 09:57:55 +00:00
commit e25e9dc817
12 changed files with 80 additions and 22 deletions

View File

@ -9,6 +9,8 @@ import numpy as np
import soundfile as sf
# import tomli
from cached_path import cached_path
from appPublic.textsplit import split_text_with_dialog_preserved
from ahserver.serverenv import get_serverenv
import pycld2 as cld
import cn2an
@ -51,6 +53,20 @@ ode_method = "euler"
sway_sampling_coef = -1.0
speed = 1.0
def audio_ndarray_to_base64(waveform: np.ndarray, sample_rate: int = 16000) -> str:
# 如果是单通道,确保 shape 为 (samples, 1)
if waveform.ndim == 1:
waveform = waveform.reshape(-1, 1)
# 写入内存 bufferWAV 格式)
buffer = io.BytesIO()
sf.write(buffer, waveform, samplerate=sample_rate, format='WAV')
buffer.seek(0)
# base64 编码
b64_audio = base64.b64encode(buffer.read()).decode('utf-8')
return b64_audio
def write_wav_buffer(wav, nchannels, framerate):
fs = FileStorage()
fn = fs._name2path(f'{getID()}.wav', userid='tmp')
@ -133,28 +149,22 @@ class F5TTS:
return t
async def split_text(self, text_gen, speaker):
reg1 = r"(?=\[\w+\])"
lang = await awaitify(detect_language)(text_gen)
if self.config.language.get(lang):
reg1 = r"{}".format(self.config.language.get(lang).sentence_splitter)
if lang == 'zh':
text_gen = await awaitify(cn2an.transform)(text_gen, 'an2cn')
chunks = re.split(reg1, text_gen)
chunks = split_text_with_dialog_preserved(text_gen)
# reg2 = self.config.speaker_match
reg2 = r"\[\[(\w+)\]\]"
ret = []
for text in chunks:
if text == ['\r', '']:
continue
lang = await awaitify(detect_language)(text)
if lang == 'zh':
text = await awaitify(cn2an.transform)(text, 'an2cn')
voice = speaker
match = re.match(reg2, text)
if match:
debug(f'{text=}, match {reg2=}')
voice = match[1]
if voice not in self.voices:
voice = "main"
debug(f'{text} inferences with speaker({voice})..{reg2=}')
voice = speaker
text = re.sub(reg2, "", text)
gen_text = text.strip()
ref_audio = self.voices[voice]["ref_audio"]
@ -184,6 +194,12 @@ class F5TTS:
except:
debug(f'{gen_text=} inference error\n{format_exc()}')
async def inference_stream(self, prompt, speaker, speed_factor=1.0):
for d in self._inference_stream(prompt, speaker, speed_factor=speed_factor)
audio_b64=audio_ndarray_to_base64(d['audio'], d['sample_rate'])
d['audio'] = audio_b64
yield d
def setup_voices(self):
config = getConfig()
d = None
@ -204,22 +220,32 @@ class F5TTS:
'ref_audio':ref_audio
}
def copyfile(self, src, dest):
with open(src, 'rb') as f:
b = f.read()
with open(dest, 'wb') as f1:
f1.write(b)
async def add_voice(self, speaker, ref_audio, ref_text):
debug(f'{speaker=}, {ref_audio=}, {ref_text=}');
config = getConfig()
ref_audio = FileStorage().realPath(ref_audio)
workdir = get_serverenv('workdir')
fn = f'{workdir}/samples/{getID()}.wav')
await awaitify(self.copyfile)(ref_audio, fn)
os.unlink(ref_adio)
self.speakers[speaker] = {
'ref_text':ref_text,
'ref_audio':ref_audio
'ref_audio':fn
}
f = awaitify(preprocess_ref_audio_text)
ref_audio, ref_text = await f(ref_audio, ref_text)
self.voices[speaker] = {
'ref_text':ref_text,
'ref_audio':ref_audio
'ref_audio':fn
}
with codecs.open(config.speakers_file, 'w', 'utf-8') as f:
f.write(json.dumps(self.speakers, indent=4))
f.write(json.dumps(self.speakers, indent=4, ensure_ascii=False))
return None
async def _inference(self, prompt, speaker, speed_factor=1.0):
@ -280,11 +306,13 @@ def test1():
sleep(36000)
return {}
f5 = None
def init():
global f5
g = ServerEnv()
f5 = F5TTS()
g.infer_stream = f5.infer_stream
g.inference_stream = f5._inference_stream
g.inference_stream = f5.inference_stream
g.get_speakers = f5.get_speakers
g.infer = f5._inference
g.test1 = awaitify(test1)

View File

@ -1,3 +1,4 @@
numpy
soundfile
cached_path
pycld2

View File

@ -5,7 +5,7 @@
"title":"向知识库添加文件",
"description":"可以添加的文件类型有:文本文件(.txt数据文件(.csv)excel文件.xlsx, .xls)word文件.doc, .docx), 演示文件(.ppt, .pptx), pdf文件",
"method":"POST",
"submit_url":"{{entire_url('api/add')}}",
"submit_url":"{{entire_url('v1/add')}}",
"fields":[
{
"name":"file_path",

View File

@ -15,7 +15,7 @@
"title":"添加播音员",
"method":"POST",
"description":"通过输入播音员id录音和录音文字说明来添加播音员",
"submit_url":"{{entire_url('/api/addvoice')}}",
"submit_url":"{{entire_url('/v1/addvoice')}}",
"fields":[
{
"name":"speaker",

View File

@ -2,7 +2,7 @@
"widgettype":"Form",
"options":{
"height":"70%",
"submit_url":"{{entire_url('api/query')}}",
"submit_url":"{{entire_url('v1/query')}}",
"fields":[
{
"name":"prompt",

View File

@ -12,7 +12,7 @@
"widgettype":"Form",
"id":"form",
"options":{
"submit_url":"{{entire_url('/api/inference')}}",
"submit_url":"{{entire_url('/v1/inference')}}",
"fields":[
{
"name":"speaker",

View File

@ -13,7 +13,7 @@
"id":"form",
"options":{
"title":"流式返回",
"submit_url":"{{entire_url('/api/infer_stream')}}",
"submit_url":"{{entire_url('/v1/infer_stream')}}",
"fields":[
{
"name":"speaker",

21
wwwroot/v1/index.md Normal file
View File

@ -0,0 +1,21 @@
# API for F5TTS wraped web server
we apply following apis
## addvoice
* path: /v1/add_voice
* method: POST
* form data:
1 ref_text: text
2 ref_audio: vocal audio
3 speaker: speaker name for ref_audio voice
examples
```
curl .../v1/add_voice \
-F "speaker=Trump" \
-F "ref_text=today is a good day" \
-F "ref_audio=@goodday.wav"
```

8
wwwroot/v1/index.ui Normal file
View File

@ -0,0 +1,8 @@
{
"widgettype":"MdWidget",
"options":{
"height":"100%",
"width":"100%",
"md_url":"{{entire_url('index.md')}}"
}
}

View File

@ -1,7 +1,7 @@
debug(f'{params_kw=}')
async def g():
speaker = params_kw.speaker or 'main'
async for d in infer_stream(params_kw.prompt, speaker):
yield entire_url('/idfile') + f'?path={d}'
async for d in inference_stream(params_kw.prompt, speaker):
yield d
return await stream_response(request, g)