bugfix
This commit is contained in:
parent
31039f2bc7
commit
f4c8252c52
44
app/f5tts.py
44
app/f5tts.py
@ -9,6 +9,8 @@ import numpy as np
|
||||
import soundfile as sf
|
||||
# import tomli
|
||||
from cached_path import cached_path
|
||||
from appPublic.textsplit import split_text_with_dialog_preserved
|
||||
from ahserver.serverenv import get_serverenv
|
||||
import pycld2 as cld
|
||||
import cn2an
|
||||
|
||||
@ -133,28 +135,22 @@ class F5TTS:
|
||||
return t
|
||||
|
||||
async def split_text(self, text_gen, speaker):
|
||||
reg1 = r"(?=\[\w+\])"
|
||||
lang = await awaitify(detect_language)(text_gen)
|
||||
if self.config.language.get(lang):
|
||||
reg1 = r"{}".format(self.config.language.get(lang).sentence_splitter)
|
||||
if lang == 'zh':
|
||||
text_gen = await awaitify(cn2an.transform)(text_gen, 'an2cn')
|
||||
|
||||
chunks = re.split(reg1, text_gen)
|
||||
chunks = split_text_with_dialog_preserved(text_gen)
|
||||
# reg2 = self.config.speaker_match
|
||||
reg2 = r"\[\[(\w+)\]\]"
|
||||
ret = []
|
||||
for text in chunks:
|
||||
if text == ['\r', '']:
|
||||
continue
|
||||
lang = await awaitify(detect_language)(text)
|
||||
if lang == 'zh':
|
||||
text = await awaitify(cn2an.transform)(text, 'an2cn')
|
||||
voice = speaker
|
||||
match = re.match(reg2, text)
|
||||
if match:
|
||||
debug(f'{text=}, match {reg2=}')
|
||||
voice = match[1]
|
||||
if voice not in self.voices:
|
||||
voice = "main"
|
||||
debug(f'{text} inferences with speaker({voice})..{reg2=}')
|
||||
voice = speaker
|
||||
text = re.sub(reg2, "", text)
|
||||
gen_text = text.strip()
|
||||
ref_audio = self.voices[voice]["ref_audio"]
|
||||
@ -184,6 +180,12 @@ class F5TTS:
|
||||
except:
|
||||
debug(f'{gen_text=} inference error\n{format_exc()}')
|
||||
|
||||
async def inference_stream(self, prompt, speaker, speed_factor=1.0):
|
||||
for d in self._inference_stream(prompt, speaker, speed_factor=speed_factor)
|
||||
audio_b64=sound_ndarray2b64(d['audio'], d['sample_rate'])
|
||||
d['audio'] = audio_b64
|
||||
yield d
|
||||
|
||||
def setup_voices(self):
|
||||
config = getConfig()
|
||||
d = None
|
||||
@ -204,22 +206,32 @@ class F5TTS:
|
||||
'ref_audio':ref_audio
|
||||
}
|
||||
|
||||
def copyfile(self, src, dest):
|
||||
with open(src, 'rb') as f:
|
||||
b = f.read()
|
||||
with open(dest, 'wb') as f1:
|
||||
f1.write(b)
|
||||
|
||||
async def add_voice(self, speaker, ref_audio, ref_text):
|
||||
debug(f'{speaker=}, {ref_audio=}, {ref_text=}');
|
||||
config = getConfig()
|
||||
ref_audio = FileStorage().realPath(ref_audio)
|
||||
workdir = get_serverenv('workdir')
|
||||
fn = f'{workdir}/samples/{getID()}.wav')
|
||||
await awaitify(self.copyfile)(ref_audio, fn)
|
||||
os.unlink(ref_adio)
|
||||
self.speakers[speaker] = {
|
||||
'ref_text':ref_text,
|
||||
'ref_audio':ref_audio
|
||||
'ref_audio':fn
|
||||
}
|
||||
f = awaitify(preprocess_ref_audio_text)
|
||||
ref_audio, ref_text = await f(ref_audio, ref_text)
|
||||
self.voices[speaker] = {
|
||||
'ref_text':ref_text,
|
||||
'ref_audio':ref_audio
|
||||
'ref_audio':fn
|
||||
}
|
||||
with codecs.open(config.speakers_file, 'w', 'utf-8') as f:
|
||||
f.write(json.dumps(self.speakers, indent=4))
|
||||
f.write(json.dumps(self.speakers, indent=4, ensure_ascii=False))
|
||||
return None
|
||||
|
||||
async def _inference(self, prompt, speaker, speed_factor=1.0):
|
||||
@ -280,11 +292,13 @@ def test1():
|
||||
sleep(36000)
|
||||
return {}
|
||||
|
||||
f5 = None
|
||||
def init():
|
||||
global f5
|
||||
g = ServerEnv()
|
||||
f5 = F5TTS()
|
||||
g.infer_stream = f5.infer_stream
|
||||
g.inference_stream = f5._inference_stream
|
||||
g.inference_stream = f5.inference_stream
|
||||
g.get_speakers = f5.get_speakers
|
||||
g.infer = f5._inference
|
||||
g.test1 = awaitify(test1)
|
||||
|
21
wwwroot/v1/index.md
Normal file
21
wwwroot/v1/index.md
Normal file
@ -0,0 +1,21 @@
|
||||
# API for F5TTS wraped web server
|
||||
we apply following apis
|
||||
|
||||
## addvoice
|
||||
|
||||
* path: /v1/add_voice
|
||||
* method: POST
|
||||
* form data:
|
||||
1 ref_text: text
|
||||
2 ref_audio: vocal audio
|
||||
3 speaker: speaker name for ref_audio voice
|
||||
|
||||
examples
|
||||
```
|
||||
curl .../v1/add_voice \
|
||||
-F "speaker=Trump" \
|
||||
-F "ref_text=today is a good day" \
|
||||
-F "ref_audio=@goodday.wav"
|
||||
```
|
||||
|
||||
|
8
wwwroot/v1/index.ui
Normal file
8
wwwroot/v1/index.ui
Normal file
@ -0,0 +1,8 @@
|
||||
{
|
||||
"widgettype":"MdWidget",
|
||||
"options":{
|
||||
"height":"100%",
|
||||
"width":"100%",
|
||||
"md_url":"{{entire_url('index.md')}}"
|
||||
}
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
debug(f'{params_kw=}')
|
||||
async def g():
|
||||
speaker = params_kw.speaker or 'main'
|
||||
async for d in infer_stream(params_kw.prompt, speaker):
|
||||
yield entire_url('/idfile') + f'?path={d}'
|
||||
async for d in inference_stream(params_kw.prompt, speaker):
|
||||
yield d
|
||||
|
||||
return await stream_response(request, g)
|
Loading…
Reference in New Issue
Block a user