This commit is contained in:
yumoqing 2025-04-23 15:02:03 +08:00
parent a0569d3d11
commit 05b712262f
2 changed files with 128 additions and 0 deletions

118
app/kokorotts.py Normal file
View File

@ -0,0 +1,118 @@
import pycld2 as cld
import cn2an
import soundfile as sf
from models import build_model
import torch
from kokoro import generate
from appPublic.uniqueID import getID
from appPublic.worker import awaitify
from appPublic.dictObject import DictObject
from appPublic.folderUtils import temp_file
from appPublic.jsonConfig import getConfig
from ahserver.webapp import webapp
from ahserver.serverenv import ServerEnv
from ahserver.filestorage import FileStorage
"""
it look like need to clone the repo from
https://huggingface.co/hexgrad/Kokoro-82M
and go inside the repo folder
"""
def write_wav_buffer(wav, nchannels, framerate):
fs = FileStorage()
fn = fs._name2path(f'{getID()}.wav', userid='tmp')
os.makedirs(os.path.dirname(fn))
debug(fn)
with open(fn, "wb") as f:
sf.write(f.name, wav, framerate)
return fs.webpath(fn)
async_write_wav_buffer = awaitify(write_wav_buffer)
def detect_language(txt):
isReliable, textBytesFound, details = cld.detect(txt)
debug(f' detect_language():{isReliable=}, {textBytesFound=}, {details=} ')
return details[0][1]
class Kokorotts:
def __init__(self):
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.model = build_model('kokoro-v0_19.pth', device)
self.voices = [
'af', # Default voice is a 50-50 mix of Bella & Sarah
'af_bella', 'af_sarah', 'am_adam', 'am_michael',
'bf_emma', 'bf_isabella', 'bm_george', 'bm_lewis',
'af_nicole', 'af_sky',
]
self.loaded_voices = {}
def load_voice(self, voice):
if voice is not in self.voice:
raise Exception(f'{voice} is not a valid voice name')
loadedv = self.loaded_voices.get(voice)
if loadedv:
return loadedv
voicepack = torch.load(f'voices/{voice}.pt', weights_only=True).to(self.device)
self.loaded_voices[voice] = voicepack
return voicepack
async def generate(self, prompt, voice):
"""
prompt is the text will be transcrible to speech
voice is a name of the voice
"""
if voice not in self.voices:
voice = self.voices[0]
chunks = await self.split_text(prompt, voice)
async for a,ps in self.gen_iter(chunks):
pat = await async_write_wav_buffer(a)
yield pat
async def split_text(self, text_gen, speaker):
reg1 = r"(?=\[\w+\])"
lang = await awaitify(detect_language)(text_gen)
if self.config.language.get(lang):
reg1 = r"{}".format(self.config.language.get(lang).sentence_splitter)
if lang == 'zh':
text_gen = await awaitify(cn2an.transform)(text_gen, 'an2cn')
chunks = re.split(reg1, text_gen)
reg2 = r"\[\[(\w+)\]\]"
ret = []
for text in chunks:
if text == ['\r', '']:
continue
voice = speaker
voicepak = await self.load_voice(voice)
match = re.match(reg2, text)
if match:
debug(f'{text=}, match {reg2=}')
voice = match[1]
if voice not in self.voices:
voice = "main"
text = re.sub(reg2, "", text)
gen_text = text.strip()
ret.append({'text':gen_text, 'voicepak':voicepak, 'voice':voice})
return ret
def tts_generate(self, text, vp, lang):
return generate(self.model, text, vp, lang=lang)
async gen_iter(self, chunks):
for c in chunks:
f = awaitify(self.tts_generate)
audio, out_ps = await f(c['text'], c['voicepak'], c['voice'][0])
yield audio, out_ps
def init():
t = Kokorotts()
g = ServerEnv()
g.kokoro_generate = t.generate
if __name__ == '__main__':
webapp(init)

10
requirements.txt Normal file
View File

@ -0,0 +1,10 @@
# sudo apt-get -y install espeak-ng
phonemizer
torch
transformers
scipy
munch
kokoro
pycld2
cn2an